2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
90 Player::Player (shared_ptr<const Film> film)
93 , _ignore_video (false)
94 , _ignore_audio (false)
95 , _ignore_text (false)
96 , _always_burn_open_subtitles (false)
98 , _tolerant (film->tolerant())
99 , _play_referenced (false)
100 , _audio_merger (_film->audio_frame_rate())
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
108 , _playlist (playlist_)
110 , _ignore_video (false)
111 , _ignore_audio (false)
112 , _ignore_text (false)
113 , _always_burn_open_subtitles (false)
115 , _tolerant (film->tolerant())
116 , _play_referenced (false)
117 , _audio_merger (_film->audio_frame_rate())
126 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127 /* The butler must hear about this first, so since we are proxying this through to the butler we must
130 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132 set_video_container_size (_film->frame_size ());
134 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
137 seek (DCPTime (), true);
146 Player::setup_pieces ()
148 boost::mutex::scoped_lock lm (_mutex);
149 setup_pieces_unlocked ();
154 have_video (shared_ptr<const Content> content)
156 return static_cast<bool>(content->video) && content->video->use();
160 have_audio (shared_ptr<const Content> content)
162 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 list<shared_ptr<Piece> > old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
189 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
190 if (j->content == i) {
191 old_decoder = j->decoder;
196 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197 DCPOMATIC_ASSERT (decoder);
199 FrameRateChange frc (_film, i);
201 if (decoder->video && _ignore_video) {
202 decoder->video->set_ignore (true);
205 if (decoder->audio && _ignore_audio) {
206 decoder->audio->set_ignore (true);
210 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
211 i->set_ignore (true);
215 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217 dcp->set_decode_referenced (_play_referenced);
218 if (_play_referenced) {
219 dcp->set_forced_reduction (_dcp_decode_reduction);
223 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
224 _pieces.push_back (piece);
226 if (decoder->video) {
227 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
228 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
235 if (decoder->audio) {
236 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
239 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
241 while (j != decoder->text.end()) {
242 (*j)->BitmapStart.connect (
243 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245 (*j)->PlainStart.connect (
246 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
249 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255 if (decoder->atmos) {
256 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260 _stream_states.clear ();
261 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
262 if (i->content->audio) {
263 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
264 _stream_states[j] = StreamState (i, i->content->position ());
269 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
272 _last_video_time = DCPTime ();
273 _last_video_eyes = EYES_BOTH;
274 _last_audio_time = DCPTime ();
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
280 if (type == CHANGE_TYPE_PENDING) {
281 /* The player content is probably about to change, so we can't carry on
282 until that has happened and we've rebuilt our pieces. Stop pass()
283 and seek() from working until then.
286 } else if (type == CHANGE_TYPE_DONE) {
287 /* A change in our content has gone through. Re-build our pieces. */
290 } else if (type == CHANGE_TYPE_CANCELLED) {
294 Change (type, property, frequent);
298 Player::set_video_container_size (dcp::Size s)
300 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
303 boost::mutex::scoped_lock lm (_mutex);
305 if (s == _video_container_size) {
307 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 _video_container_size = s;
313 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314 _black_image->make_black ();
317 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321 Player::playlist_change (ChangeType type)
323 if (type == CHANGE_TYPE_DONE) {
326 Change (type, PlayerProperty::PLAYLIST, false);
330 Player::film_change (ChangeType type, Film::Property p)
332 /* Here we should notice Film properties that affect our output, and
333 alert listeners that our output now would be different to how it was
334 last time we were run.
337 if (p == Film::CONTAINER) {
338 Change (type, PlayerProperty::FILM_CONTAINER, false);
339 } else if (p == Film::VIDEO_FRAME_RATE) {
340 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341 so we need new pieces here.
343 if (type == CHANGE_TYPE_DONE) {
346 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347 } else if (p == Film::AUDIO_PROCESSOR) {
348 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349 boost::mutex::scoped_lock lm (_mutex);
350 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
352 } else if (p == Film::AUDIO_CHANNELS) {
353 if (type == CHANGE_TYPE_DONE) {
354 boost::mutex::scoped_lock lm (_mutex);
355 _audio_merger.clear ();
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
363 return shared_ptr<PlayerVideo> (
365 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
368 _video_container_size,
369 _video_container_size,
372 PresetColourConversion::all().front().conversion,
374 boost::weak_ptr<Content>(),
375 boost::optional<Frame>(),
382 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
384 DCPTime s = t - piece->content->position ();
385 s = min (piece->content->length_after_trim(_film), s);
386 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
388 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
389 then convert that ContentTime to frames at the content's rate. However this fails for
390 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
391 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
393 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
395 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
399 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
401 /* See comment in dcp_to_content_video */
402 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
403 return d + piece->content->position();
407 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
409 DCPTime s = t - piece->content->position ();
410 s = min (piece->content->length_after_trim(_film), s);
411 /* See notes in dcp_to_content_video */
412 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
416 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
418 /* See comment in dcp_to_content_video */
419 return DCPTime::from_frames (f, _film->audio_frame_rate())
420 - DCPTime (piece->content->trim_start(), piece->frc)
421 + piece->content->position();
425 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
427 DCPTime s = t - piece->content->position ();
428 s = min (piece->content->length_after_trim(_film), s);
429 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
433 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
435 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
438 list<shared_ptr<Font> >
439 Player::get_subtitle_fonts ()
441 boost::mutex::scoped_lock lm (_mutex);
443 list<shared_ptr<Font> > fonts;
444 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
445 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
446 /* XXX: things may go wrong if there are duplicate font IDs
447 with different font files.
449 list<shared_ptr<Font> > f = j->fonts ();
450 copy (f.begin(), f.end(), back_inserter (fonts));
457 /** Set this player never to produce any video data */
459 Player::set_ignore_video ()
461 boost::mutex::scoped_lock lm (_mutex);
462 _ignore_video = true;
463 setup_pieces_unlocked ();
467 Player::set_ignore_audio ()
469 boost::mutex::scoped_lock lm (_mutex);
470 _ignore_audio = true;
471 setup_pieces_unlocked ();
475 Player::set_ignore_text ()
477 boost::mutex::scoped_lock lm (_mutex);
479 setup_pieces_unlocked ();
482 /** Set the player to always burn open texts into the image regardless of the content settings */
484 Player::set_always_burn_open_subtitles ()
486 boost::mutex::scoped_lock lm (_mutex);
487 _always_burn_open_subtitles = true;
490 /** Sets up the player to be faster, possibly at the expense of quality */
494 boost::mutex::scoped_lock lm (_mutex);
496 setup_pieces_unlocked ();
500 Player::set_play_referenced ()
502 boost::mutex::scoped_lock lm (_mutex);
503 _play_referenced = true;
504 setup_pieces_unlocked ();
508 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
510 DCPOMATIC_ASSERT (r);
511 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
512 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
513 if (r->actual_duration() > 0) {
515 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
520 list<ReferencedReelAsset>
521 Player::get_reel_assets ()
523 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
525 list<ReferencedReelAsset> a;
527 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
528 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
533 scoped_ptr<DCPDecoder> decoder;
535 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
540 DCPOMATIC_ASSERT (j->video_frame_rate ());
541 double const cfr = j->video_frame_rate().get();
542 Frame const trim_start = j->trim_start().frames_round (cfr);
543 Frame const trim_end = j->trim_end().frames_round (cfr);
544 int const ffr = _film->video_frame_rate ();
546 /* position in the asset from the start */
547 int64_t offset_from_start = 0;
548 /* position in the asset from the end */
549 int64_t offset_from_end = 0;
550 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
551 /* Assume that main picture duration is the length of the reel */
552 offset_from_end += k->main_picture()->actual_duration();
555 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
557 /* Assume that main picture duration is the length of the reel */
558 int64_t const reel_duration = k->main_picture()->actual_duration();
560 /* See doc/design/trim_reels.svg */
561 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
562 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
564 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
565 if (j->reference_video ()) {
566 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
569 if (j->reference_audio ()) {
570 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
573 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
574 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
577 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
578 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
579 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
583 offset_from_start += reel_duration;
584 offset_from_end -= reel_duration;
594 boost::mutex::scoped_lock lm (_mutex);
597 /* We can't pass in this state */
598 LOG_DEBUG_PLAYER_NC ("Player is suspended");
602 if (_playback_length == DCPTime()) {
603 /* Special; just give one black frame */
604 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
608 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
610 shared_ptr<Piece> earliest_content;
611 optional<DCPTime> earliest_time;
613 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
618 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
619 if (t > i->content->end(_film)) {
623 /* Given two choices at the same time, pick the one with texts so we see it before
626 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
628 earliest_content = i;
642 if (earliest_content) {
646 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
647 earliest_time = _black.position ();
651 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
652 earliest_time = _silent.position ();
659 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
660 earliest_content->done = earliest_content->decoder->pass ();
661 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
662 if (dcp && !_play_referenced && dcp->reference_audio()) {
663 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
664 to `hide' the fact that no audio was emitted during the referenced DCP (though
665 we need to behave as though it was).
667 _last_audio_time = dcp->end (_film);
672 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
673 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
674 _black.set_position (_black.position() + one_video_frame());
678 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
679 DCPTimePeriod period (_silent.period_at_position());
680 if (_last_audio_time) {
681 /* Sometimes the thing that happened last finishes fractionally before
682 or after this silence. Bodge the start time of the silence to fix it.
683 I think this is nothing to worry about since we will just add or
684 remove a little silence at the end of some content.
686 int64_t const error = labs(period.from.get() - _last_audio_time->get());
687 /* Let's not worry about less than a frame at 24fps */
688 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
689 if (error >= too_much_error) {
690 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
692 DCPOMATIC_ASSERT (error < too_much_error);
693 period.from = *_last_audio_time;
695 if (period.duration() > one_video_frame()) {
696 period.to = period.from + one_video_frame();
699 _silent.set_position (period.to);
707 /* Emit any audio that is ready */
709 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
710 of our streams, or the position of the _silent.
712 DCPTime pull_to = _playback_length;
713 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
714 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
715 pull_to = i->second.last_push_end;
718 if (!_silent.done() && _silent.position() < pull_to) {
719 pull_to = _silent.position();
722 LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
723 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
724 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
725 if (_last_audio_time && i->second < *_last_audio_time) {
726 /* This new data comes before the last we emitted (or the last seek); discard it */
727 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
732 } else if (_last_audio_time && i->second > *_last_audio_time) {
733 /* There's a gap between this data and the last we emitted; fill with silence */
734 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
737 emit_audio (i->first, i->second);
742 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
743 do_emit_video(i->first, i->second);
750 /** @return Open subtitles for the frame at the given time, converted to images */
751 optional<PositionImage>
752 Player::open_subtitles_for_frame (DCPTime time) const
754 list<PositionImage> captions;
755 int const vfr = _film->video_frame_rate();
759 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
762 /* Bitmap subtitles */
763 BOOST_FOREACH (BitmapText i, j.bitmap) {
768 /* i.image will already have been scaled to fit _video_container_size */
769 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
775 lrint (_video_container_size.width * i.rectangle.x),
776 lrint (_video_container_size.height * i.rectangle.y)
782 /* String subtitles (rendered to an image) */
783 if (!j.string.empty ()) {
784 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
785 copy (s.begin(), s.end(), back_inserter (captions));
789 if (captions.empty ()) {
790 return optional<PositionImage> ();
793 return merge (captions);
797 Player::video (weak_ptr<Piece> wp, ContentVideo video)
799 shared_ptr<Piece> piece = wp.lock ();
804 if (!piece->content->video->use()) {
808 FrameRateChange frc (_film, piece->content);
809 if (frc.skip && (video.frame % 2) == 1) {
813 /* Time of the first frame we will emit */
814 DCPTime const time = content_video_to_dcp (piece, video.frame);
815 LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
817 /* Discard if it's before the content's period or the last accurate seek. We can't discard
818 if it's after the content's period here as in that case we still need to fill any gap between
819 `now' and the end of the content's period.
821 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
825 /* Fill gaps that we discover now that we have some video which needs to be emitted.
826 This is where we need to fill to.
828 DCPTime fill_to = min (time, piece->content->end(_film));
830 if (_last_video_time) {
831 DCPTime fill_from = max (*_last_video_time, piece->content->position());
833 /* Fill if we have more than half a frame to do */
834 if ((fill_to - fill_from) > one_video_frame() / 2) {
835 LastVideoMap::const_iterator last = _last_video.find (wp);
836 if (_film->three_d()) {
837 Eyes fill_to_eyes = video.eyes;
838 if (fill_to_eyes == EYES_BOTH) {
839 fill_to_eyes = EYES_LEFT;
841 if (fill_to == piece->content->end(_film)) {
842 /* Don't fill after the end of the content */
843 fill_to_eyes = EYES_LEFT;
845 DCPTime j = fill_from;
846 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
847 if (eyes == EYES_BOTH) {
850 while (j < fill_to || eyes != fill_to_eyes) {
851 if (last != _last_video.end()) {
852 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
853 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
854 copy->set_eyes (eyes);
855 emit_video (copy, j);
857 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
858 emit_video (black_player_video_frame(eyes), j);
860 if (eyes == EYES_RIGHT) {
861 j += one_video_frame();
863 eyes = increment_eyes (eyes);
866 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
867 if (last != _last_video.end()) {
868 emit_video (last->second, j);
870 emit_video (black_player_video_frame(EYES_BOTH), j);
877 _last_video[wp].reset (
880 piece->content->video->crop (),
881 piece->content->video->fade (_film, video.frame),
882 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
883 _video_container_size,
886 piece->content->video->colour_conversion(),
887 piece->content->video->range(),
895 for (int i = 0; i < frc.repeat; ++i) {
896 if (t < piece->content->end(_film)) {
897 emit_video (_last_video[wp], t);
899 t += one_video_frame ();
904 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
906 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
908 shared_ptr<Piece> piece = wp.lock ();
913 shared_ptr<AudioContent> content = piece->content->audio;
914 DCPOMATIC_ASSERT (content);
916 int const rfr = content->resampled_frame_rate (_film);
918 /* Compute time in the DCP */
919 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
920 LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
922 /* And the end of this block in the DCP */
923 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
925 /* Remove anything that comes before the start or after the end of the content */
926 if (time < piece->content->position()) {
927 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
929 /* This audio is entirely discarded */
932 content_audio.audio = cut.first;
934 } else if (time > piece->content->end(_film)) {
937 } else if (end > piece->content->end(_film)) {
938 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
939 if (remaining_frames == 0) {
942 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
945 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
949 if (content->gain() != 0) {
950 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
951 gain->apply_gain (content->gain ());
952 content_audio.audio = gain;
957 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
961 if (_audio_processor) {
962 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
967 _audio_merger.push (content_audio.audio, time);
968 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
969 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
973 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
975 shared_ptr<Piece> piece = wp.lock ();
976 shared_ptr<const TextContent> text = wc.lock ();
977 if (!piece || !text) {
981 /* Apply content's subtitle offsets */
982 subtitle.sub.rectangle.x += text->x_offset ();
983 subtitle.sub.rectangle.y += text->y_offset ();
985 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
986 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
987 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
989 /* Apply content's subtitle scale */
990 subtitle.sub.rectangle.width *= text->x_scale ();
991 subtitle.sub.rectangle.height *= text->y_scale ();
994 shared_ptr<Image> image = subtitle.sub.image;
996 /* We will scale the subtitle up to fit _video_container_size */
997 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
998 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
999 if (width == 0 || height == 0) {
1003 dcp::Size scaled_size (width, height);
1004 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1005 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1007 _active_texts[text->type()].add_from (wc, ps, from);
1011 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1013 shared_ptr<Piece> piece = wp.lock ();
1014 shared_ptr<const TextContent> text = wc.lock ();
1015 if (!piece || !text) {
1020 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1022 if (from > piece->content->end(_film)) {
1026 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1027 s.set_h_position (s.h_position() + text->x_offset ());
1028 s.set_v_position (s.v_position() + text->y_offset ());
1029 float const xs = text->x_scale();
1030 float const ys = text->y_scale();
1031 float size = s.size();
1033 /* Adjust size to express the common part of the scaling;
1034 e.g. if xs = ys = 0.5 we scale size by 2.
1036 if (xs > 1e-5 && ys > 1e-5) {
1037 size *= 1 / min (1 / xs, 1 / ys);
1041 /* Then express aspect ratio changes */
1042 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1043 s.set_aspect_adjust (xs / ys);
1046 s.set_in (dcp::Time(from.seconds(), 1000));
1047 ps.string.push_back (StringText (s, text->outline_width()));
1048 ps.add_fonts (text->fonts ());
1051 _active_texts[text->type()].add_from (wc, ps, from);
1055 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1057 shared_ptr<const TextContent> text = wc.lock ();
1062 if (!_active_texts[text->type()].have(wc)) {
1066 shared_ptr<Piece> piece = wp.lock ();
1071 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1073 if (dcp_to > piece->content->end(_film)) {
1077 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1079 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1080 if (text->use() && !always && !text->burn()) {
1081 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1086 Player::seek (DCPTime time, bool accurate)
1088 boost::mutex::scoped_lock lm (_mutex);
1089 LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1092 /* We can't seek in this state */
1097 _shuffler->clear ();
1102 if (_audio_processor) {
1103 _audio_processor->flush ();
1106 _audio_merger.clear ();
1107 for (int i = 0; i < TEXT_COUNT; ++i) {
1108 _active_texts[i].clear ();
1111 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1112 if (time < i->content->position()) {
1113 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1114 we must seek this (following) content accurately, otherwise when we come to the end of the current
1115 content we may not start right at the beginning of the next, causing a gap (if the next content has
1116 been trimmed to a point between keyframes, or something).
1118 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1120 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1121 /* During; seek to position */
1122 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1125 /* After; this piece is done */
1131 _last_video_time = time;
1132 _last_video_eyes = EYES_LEFT;
1133 _last_audio_time = time;
1135 _last_video_time = optional<DCPTime>();
1136 _last_video_eyes = optional<Eyes>();
1137 _last_audio_time = optional<DCPTime>();
1140 _black.set_position (time);
1141 _silent.set_position (time);
1143 _last_video.clear ();
1147 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1149 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1150 player before the video that requires them.
1152 _delay.push_back (make_pair (pv, time));
1154 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1155 _last_video_time = time + one_video_frame();
1157 _last_video_eyes = increment_eyes (pv->eyes());
1159 if (_delay.size() < 3) {
1163 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1165 do_emit_video (to_do.first, to_do.second);
1169 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1171 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1172 for (int i = 0; i < TEXT_COUNT; ++i) {
1173 _active_texts[i].clear_before (time);
1177 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1179 pv->set_text (subtitles.get ());
1186 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1188 /* Log if the assert below is about to fail */
1189 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1190 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1193 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1194 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1195 Audio (data, time, _film->audio_frame_rate());
1196 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1200 Player::fill_audio (DCPTimePeriod period)
1202 if (period.from == period.to) {
1206 DCPOMATIC_ASSERT (period.from < period.to);
1208 DCPTime t = period.from;
1209 while (t < period.to) {
1210 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1211 Frame const samples = block.frames_round(_film->audio_frame_rate());
1213 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1214 silence->make_silent ();
1215 emit_audio (silence, t);
1222 Player::one_video_frame () const
1224 return DCPTime::from_frames (1, _film->video_frame_rate ());
1227 pair<shared_ptr<AudioBuffers>, DCPTime>
1228 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1230 DCPTime const discard_time = discard_to - time;
1231 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1232 Frame remaining_frames = audio->frames() - discard_frames;
1233 if (remaining_frames <= 0) {
1234 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1236 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1237 return make_pair(cut, time + discard_time);
1241 Player::set_dcp_decode_reduction (optional<int> reduction)
1243 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1246 boost::mutex::scoped_lock lm (_mutex);
1248 if (reduction == _dcp_decode_reduction) {
1250 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1254 _dcp_decode_reduction = reduction;
1255 setup_pieces_unlocked ();
1258 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1262 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1264 boost::mutex::scoped_lock lm (_mutex);
1266 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1267 if (i->content == content) {
1268 return content_time_to_dcp (i, t);
1272 /* We couldn't find this content; perhaps things are being changed over */
1273 return optional<DCPTime>();
1277 shared_ptr<const Playlist>
1278 Player::playlist () const
1280 return _playlist ? _playlist : _film->playlist();
1285 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1287 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);