2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
21 #include "atmos_decoder.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
30 #include "raw_image_proxy.h"
33 #include "render_text.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
90 Player::Player (shared_ptr<const Film> film)
93 , _ignore_video (false)
94 , _ignore_audio (false)
95 , _ignore_text (false)
96 , _always_burn_open_subtitles (false)
98 , _tolerant (film->tolerant())
99 , _play_referenced (false)
100 , _audio_merger (_film->audio_frame_rate())
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
108 , _playlist (playlist_)
110 , _ignore_video (false)
111 , _ignore_audio (false)
112 , _ignore_text (false)
113 , _always_burn_open_subtitles (false)
115 , _tolerant (film->tolerant())
116 , _play_referenced (false)
117 , _audio_merger (_film->audio_frame_rate())
126 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127 /* The butler must hear about this first, so since we are proxying this through to the butler we must
130 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132 set_video_container_size (_film->frame_size ());
134 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
137 seek (DCPTime (), true);
146 Player::setup_pieces ()
148 boost::mutex::scoped_lock lm (_mutex);
149 setup_pieces_unlocked ();
154 have_video (shared_ptr<const Content> content)
156 return static_cast<bool>(content->video) && content->video->use();
160 have_audio (shared_ptr<const Content> content)
162 return static_cast<bool>(content->audio);
166 Player::setup_pieces_unlocked ()
168 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170 list<shared_ptr<Piece> > old_pieces = _pieces;
174 _shuffler = new Shuffler();
175 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
177 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
179 if (!i->paths_valid ()) {
183 if (_ignore_video && _ignore_audio && i->text.empty()) {
184 /* We're only interested in text and this content has none */
188 shared_ptr<Decoder> old_decoder;
189 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
190 if (j->content == i) {
191 old_decoder = j->decoder;
196 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197 DCPOMATIC_ASSERT (decoder);
199 FrameRateChange frc (_film, i);
201 if (decoder->video && _ignore_video) {
202 decoder->video->set_ignore (true);
205 if (decoder->audio && _ignore_audio) {
206 decoder->audio->set_ignore (true);
210 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
211 i->set_ignore (true);
215 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217 dcp->set_decode_referenced (_play_referenced);
218 if (_play_referenced) {
219 dcp->set_forced_reduction (_dcp_decode_reduction);
223 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
224 _pieces.push_back (piece);
226 if (decoder->video) {
227 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
228 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
235 if (decoder->audio) {
236 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
239 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
241 while (j != decoder->text.end()) {
242 (*j)->BitmapStart.connect (
243 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245 (*j)->PlainStart.connect (
246 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
249 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255 if (decoder->atmos) {
256 decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260 _stream_states.clear ();
261 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
262 if (i->content->audio) {
263 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
264 _stream_states[j] = StreamState (i, i->content->position ());
269 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
272 _last_video_time = DCPTime ();
273 _last_video_eyes = EYES_BOTH;
274 _last_audio_time = DCPTime ();
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
280 if (type == CHANGE_TYPE_PENDING) {
281 /* The player content is probably about to change, so we can't carry on
282 until that has happened and we've rebuilt our pieces. Stop pass()
283 and seek() from working until then.
286 } else if (type == CHANGE_TYPE_DONE) {
287 /* A change in our content has gone through. Re-build our pieces. */
290 } else if (type == CHANGE_TYPE_CANCELLED) {
294 Change (type, property, frequent);
298 Player::set_video_container_size (dcp::Size s)
300 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
303 boost::mutex::scoped_lock lm (_mutex);
305 if (s == _video_container_size) {
307 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311 _video_container_size = s;
313 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314 _black_image->make_black ();
317 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321 Player::playlist_change (ChangeType type)
323 if (type == CHANGE_TYPE_DONE) {
326 Change (type, PlayerProperty::PLAYLIST, false);
330 Player::film_change (ChangeType type, Film::Property p)
332 /* Here we should notice Film properties that affect our output, and
333 alert listeners that our output now would be different to how it was
334 last time we were run.
337 if (p == Film::CONTAINER) {
338 Change (type, PlayerProperty::FILM_CONTAINER, false);
339 } else if (p == Film::VIDEO_FRAME_RATE) {
340 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341 so we need new pieces here.
343 if (type == CHANGE_TYPE_DONE) {
346 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347 } else if (p == Film::AUDIO_PROCESSOR) {
348 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349 boost::mutex::scoped_lock lm (_mutex);
350 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
352 } else if (p == Film::AUDIO_CHANNELS) {
353 if (type == CHANGE_TYPE_DONE) {
354 boost::mutex::scoped_lock lm (_mutex);
355 _audio_merger.clear ();
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
363 return shared_ptr<PlayerVideo> (
365 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
368 _video_container_size,
369 _video_container_size,
372 PresetColourConversion::all().front().conversion,
374 boost::weak_ptr<Content>(),
375 boost::optional<Frame>(),
382 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
384 DCPTime s = t - piece->content->position ();
385 s = min (piece->content->length_after_trim(_film), s);
386 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
388 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
389 then convert that ContentTime to frames at the content's rate. However this fails for
390 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
391 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
393 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
395 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
399 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
401 /* See comment in dcp_to_content_video */
402 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
403 return d + piece->content->position();
407 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
409 DCPTime s = t - piece->content->position ();
410 s = min (piece->content->length_after_trim(_film), s);
411 /* See notes in dcp_to_content_video */
412 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
416 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
418 /* See comment in dcp_to_content_video */
419 return DCPTime::from_frames (f, _film->audio_frame_rate())
420 - DCPTime (piece->content->trim_start(), piece->frc)
421 + piece->content->position();
425 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
427 DCPTime s = t - piece->content->position ();
428 s = min (piece->content->length_after_trim(_film), s);
429 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
433 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
435 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
438 list<shared_ptr<Font> >
439 Player::get_subtitle_fonts ()
441 boost::mutex::scoped_lock lm (_mutex);
443 list<shared_ptr<Font> > fonts;
444 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
445 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
446 /* XXX: things may go wrong if there are duplicate font IDs
447 with different font files.
449 list<shared_ptr<Font> > f = j->fonts ();
450 copy (f.begin(), f.end(), back_inserter (fonts));
457 /** Set this player never to produce any video data */
459 Player::set_ignore_video ()
461 boost::mutex::scoped_lock lm (_mutex);
462 _ignore_video = true;
463 setup_pieces_unlocked ();
467 Player::set_ignore_audio ()
469 boost::mutex::scoped_lock lm (_mutex);
470 _ignore_audio = true;
471 setup_pieces_unlocked ();
475 Player::set_ignore_text ()
477 boost::mutex::scoped_lock lm (_mutex);
479 setup_pieces_unlocked ();
482 /** Set the player to always burn open texts into the image regardless of the content settings */
484 Player::set_always_burn_open_subtitles ()
486 boost::mutex::scoped_lock lm (_mutex);
487 _always_burn_open_subtitles = true;
490 /** Sets up the player to be faster, possibly at the expense of quality */
494 boost::mutex::scoped_lock lm (_mutex);
496 setup_pieces_unlocked ();
500 Player::set_play_referenced ()
502 boost::mutex::scoped_lock lm (_mutex);
503 _play_referenced = true;
504 setup_pieces_unlocked ();
508 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
510 DCPOMATIC_ASSERT (r);
511 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
512 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
513 if (r->actual_duration() > 0) {
515 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
520 list<ReferencedReelAsset>
521 Player::get_reel_assets ()
523 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
525 list<ReferencedReelAsset> a;
527 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
528 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
533 scoped_ptr<DCPDecoder> decoder;
535 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
540 DCPOMATIC_ASSERT (j->video_frame_rate ());
541 double const cfr = j->video_frame_rate().get();
542 Frame const trim_start = j->trim_start().frames_round (cfr);
543 Frame const trim_end = j->trim_end().frames_round (cfr);
544 int const ffr = _film->video_frame_rate ();
546 /* position in the asset from the start */
547 int64_t offset_from_start = 0;
548 /* position in the asset from the end */
549 int64_t offset_from_end = 0;
550 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
551 /* Assume that main picture duration is the length of the reel */
552 offset_from_end += k->main_picture()->actual_duration();
555 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
557 /* Assume that main picture duration is the length of the reel */
558 int64_t const reel_duration = k->main_picture()->actual_duration();
560 /* See doc/design/trim_reels.svg */
561 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
562 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
564 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
565 if (j->reference_video ()) {
566 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
569 if (j->reference_audio ()) {
570 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
573 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
574 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
577 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
578 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
579 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
583 offset_from_start += reel_duration;
584 offset_from_end -= reel_duration;
594 boost::mutex::scoped_lock lm (_mutex);
597 /* We can't pass in this state */
601 if (_playback_length == DCPTime()) {
602 /* Special; just give one black frame */
603 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
607 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
609 shared_ptr<Piece> earliest_content;
610 optional<DCPTime> earliest_time;
612 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
617 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
618 if (t > i->content->end(_film)) {
622 /* Given two choices at the same time, pick the one with texts so we see it before
625 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
627 earliest_content = i;
641 if (earliest_content) {
645 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
646 earliest_time = _black.position ();
650 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
651 earliest_time = _silent.position ();
658 earliest_content->done = earliest_content->decoder->pass ();
659 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
660 if (dcp && !_play_referenced && dcp->reference_audio()) {
661 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
662 to `hide' the fact that no audio was emitted during the referenced DCP (though
663 we need to behave as though it was).
665 _last_audio_time = dcp->end (_film);
670 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
671 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
672 _black.set_position (_black.position() + one_video_frame());
676 DCPTimePeriod period (_silent.period_at_position());
677 if (_last_audio_time) {
678 /* Sometimes the thing that happened last finishes fractionally before
679 or after this silence. Bodge the start time of the silence to fix it.
680 I think this is nothing to worry about since we will just add or
681 remove a little silence at the end of some content.
683 int64_t const error = labs(period.from.get() - _last_audio_time->get());
684 /* Let's not worry about less than a frame at 24fps */
685 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
686 if (error >= too_much_error) {
687 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
689 DCPOMATIC_ASSERT (error < too_much_error);
690 period.from = *_last_audio_time;
692 if (period.duration() > one_video_frame()) {
693 period.to = period.from + one_video_frame();
696 _silent.set_position (period.to);
704 /* Emit any audio that is ready */
706 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
707 of our streams, or the position of the _silent.
709 DCPTime pull_to = _playback_length;
710 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
711 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
712 pull_to = i->second.last_push_end;
715 if (!_silent.done() && _silent.position() < pull_to) {
716 pull_to = _silent.position();
719 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
720 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
721 if (_last_audio_time && i->second < *_last_audio_time) {
722 /* This new data comes before the last we emitted (or the last seek); discard it */
723 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
728 } else if (_last_audio_time && i->second > *_last_audio_time) {
729 /* There's a gap between this data and the last we emitted; fill with silence */
730 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
733 emit_audio (i->first, i->second);
738 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
739 do_emit_video(i->first, i->second);
746 /** @return Open subtitles for the frame at the given time, converted to images */
747 optional<PositionImage>
748 Player::open_subtitles_for_frame (DCPTime time) const
750 list<PositionImage> captions;
751 int const vfr = _film->video_frame_rate();
755 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
758 /* Bitmap subtitles */
759 BOOST_FOREACH (BitmapText i, j.bitmap) {
764 /* i.image will already have been scaled to fit _video_container_size */
765 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
771 lrint (_video_container_size.width * i.rectangle.x),
772 lrint (_video_container_size.height * i.rectangle.y)
778 /* String subtitles (rendered to an image) */
779 if (!j.string.empty ()) {
780 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
781 copy (s.begin(), s.end(), back_inserter (captions));
785 if (captions.empty ()) {
786 return optional<PositionImage> ();
789 return merge (captions);
793 Player::video (weak_ptr<Piece> wp, ContentVideo video)
795 shared_ptr<Piece> piece = wp.lock ();
800 if (!piece->content->video->use()) {
804 FrameRateChange frc (_film, piece->content);
805 if (frc.skip && (video.frame % 2) == 1) {
809 /* Time of the first frame we will emit */
810 DCPTime const time = content_video_to_dcp (piece, video.frame);
812 /* Discard if it's before the content's period or the last accurate seek. We can't discard
813 if it's after the content's period here as in that case we still need to fill any gap between
814 `now' and the end of the content's period.
816 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
820 /* Fill gaps that we discover now that we have some video which needs to be emitted.
821 This is where we need to fill to.
823 DCPTime fill_to = min (time, piece->content->end(_film));
825 if (_last_video_time) {
826 DCPTime fill_from = max (*_last_video_time, piece->content->position());
828 /* Fill if we have more than half a frame to do */
829 if ((fill_to - fill_from) > one_video_frame() / 2) {
830 LastVideoMap::const_iterator last = _last_video.find (wp);
831 if (_film->three_d()) {
832 Eyes fill_to_eyes = video.eyes;
833 if (fill_to_eyes == EYES_BOTH) {
834 fill_to_eyes = EYES_LEFT;
836 if (fill_to == piece->content->end(_film)) {
837 /* Don't fill after the end of the content */
838 fill_to_eyes = EYES_LEFT;
840 DCPTime j = fill_from;
841 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
842 if (eyes == EYES_BOTH) {
845 while (j < fill_to || eyes != fill_to_eyes) {
846 if (last != _last_video.end()) {
847 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
848 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
849 copy->set_eyes (eyes);
850 emit_video (copy, j);
852 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
853 emit_video (black_player_video_frame(eyes), j);
855 if (eyes == EYES_RIGHT) {
856 j += one_video_frame();
858 eyes = increment_eyes (eyes);
861 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
862 if (last != _last_video.end()) {
863 emit_video (last->second, j);
865 emit_video (black_player_video_frame(EYES_BOTH), j);
872 _last_video[wp].reset (
875 piece->content->video->crop (),
876 piece->content->video->fade (_film, video.frame),
877 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
878 _video_container_size,
881 piece->content->video->colour_conversion(),
882 piece->content->video->range(),
890 for (int i = 0; i < frc.repeat; ++i) {
891 if (t < piece->content->end(_film)) {
892 emit_video (_last_video[wp], t);
894 t += one_video_frame ();
899 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
901 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
903 shared_ptr<Piece> piece = wp.lock ();
908 shared_ptr<AudioContent> content = piece->content->audio;
909 DCPOMATIC_ASSERT (content);
911 int const rfr = content->resampled_frame_rate (_film);
913 /* Compute time in the DCP */
914 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
915 /* And the end of this block in the DCP */
916 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
918 /* Remove anything that comes before the start or after the end of the content */
919 if (time < piece->content->position()) {
920 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
922 /* This audio is entirely discarded */
925 content_audio.audio = cut.first;
927 } else if (time > piece->content->end(_film)) {
930 } else if (end > piece->content->end(_film)) {
931 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
932 if (remaining_frames == 0) {
935 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
938 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
942 if (content->gain() != 0) {
943 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
944 gain->apply_gain (content->gain ());
945 content_audio.audio = gain;
950 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
954 if (_audio_processor) {
955 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
960 _audio_merger.push (content_audio.audio, time);
961 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
962 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
966 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
968 shared_ptr<Piece> piece = wp.lock ();
969 shared_ptr<const TextContent> text = wc.lock ();
970 if (!piece || !text) {
974 /* Apply content's subtitle offsets */
975 subtitle.sub.rectangle.x += text->x_offset ();
976 subtitle.sub.rectangle.y += text->y_offset ();
978 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
979 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
980 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
982 /* Apply content's subtitle scale */
983 subtitle.sub.rectangle.width *= text->x_scale ();
984 subtitle.sub.rectangle.height *= text->y_scale ();
987 shared_ptr<Image> image = subtitle.sub.image;
989 /* We will scale the subtitle up to fit _video_container_size */
990 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
991 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
992 if (width == 0 || height == 0) {
996 dcp::Size scaled_size (width, height);
997 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
998 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1000 _active_texts[text->type()].add_from (wc, ps, from);
1004 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1006 shared_ptr<Piece> piece = wp.lock ();
1007 shared_ptr<const TextContent> text = wc.lock ();
1008 if (!piece || !text) {
1013 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1015 if (from > piece->content->end(_film)) {
1019 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1020 s.set_h_position (s.h_position() + text->x_offset ());
1021 s.set_v_position (s.v_position() + text->y_offset ());
1022 float const xs = text->x_scale();
1023 float const ys = text->y_scale();
1024 float size = s.size();
1026 /* Adjust size to express the common part of the scaling;
1027 e.g. if xs = ys = 0.5 we scale size by 2.
1029 if (xs > 1e-5 && ys > 1e-5) {
1030 size *= 1 / min (1 / xs, 1 / ys);
1034 /* Then express aspect ratio changes */
1035 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1036 s.set_aspect_adjust (xs / ys);
1039 s.set_in (dcp::Time(from.seconds(), 1000));
1040 ps.string.push_back (StringText (s, text->outline_width()));
1041 ps.add_fonts (text->fonts ());
1044 _active_texts[text->type()].add_from (wc, ps, from);
1048 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1050 shared_ptr<const TextContent> text = wc.lock ();
1055 if (!_active_texts[text->type()].have(wc)) {
1059 shared_ptr<Piece> piece = wp.lock ();
1064 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1066 if (dcp_to > piece->content->end(_film)) {
1070 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1072 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1073 if (text->use() && !always && !text->burn()) {
1074 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1079 Player::seek (DCPTime time, bool accurate)
1081 boost::mutex::scoped_lock lm (_mutex);
1084 /* We can't seek in this state */
1089 _shuffler->clear ();
1094 if (_audio_processor) {
1095 _audio_processor->flush ();
1098 _audio_merger.clear ();
1099 for (int i = 0; i < TEXT_COUNT; ++i) {
1100 _active_texts[i].clear ();
1103 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1104 if (time < i->content->position()) {
1105 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1106 we must seek this (following) content accurately, otherwise when we come to the end of the current
1107 content we may not start right at the beginning of the next, causing a gap (if the next content has
1108 been trimmed to a point between keyframes, or something).
1110 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1112 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1113 /* During; seek to position */
1114 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1117 /* After; this piece is done */
1123 _last_video_time = time;
1124 _last_video_eyes = EYES_LEFT;
1125 _last_audio_time = time;
1127 _last_video_time = optional<DCPTime>();
1128 _last_video_eyes = optional<Eyes>();
1129 _last_audio_time = optional<DCPTime>();
1132 _black.set_position (time);
1133 _silent.set_position (time);
1135 _last_video.clear ();
1139 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1141 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1142 player before the video that requires them.
1144 _delay.push_back (make_pair (pv, time));
1146 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1147 _last_video_time = time + one_video_frame();
1149 _last_video_eyes = increment_eyes (pv->eyes());
1151 if (_delay.size() < 3) {
1155 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1157 do_emit_video (to_do.first, to_do.second);
1161 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1163 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1164 for (int i = 0; i < TEXT_COUNT; ++i) {
1165 _active_texts[i].clear_before (time);
1169 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1171 pv->set_text (subtitles.get ());
1178 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1180 /* Log if the assert below is about to fail */
1181 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1182 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1185 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1186 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1187 Audio (data, time, _film->audio_frame_rate());
1188 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1192 Player::fill_audio (DCPTimePeriod period)
1194 if (period.from == period.to) {
1198 DCPOMATIC_ASSERT (period.from < period.to);
1200 DCPTime t = period.from;
1201 while (t < period.to) {
1202 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1203 Frame const samples = block.frames_round(_film->audio_frame_rate());
1205 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1206 silence->make_silent ();
1207 emit_audio (silence, t);
1214 Player::one_video_frame () const
1216 return DCPTime::from_frames (1, _film->video_frame_rate ());
1219 pair<shared_ptr<AudioBuffers>, DCPTime>
1220 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1222 DCPTime const discard_time = discard_to - time;
1223 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1224 Frame remaining_frames = audio->frames() - discard_frames;
1225 if (remaining_frames <= 0) {
1226 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1228 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1229 return make_pair(cut, time + discard_time);
1233 Player::set_dcp_decode_reduction (optional<int> reduction)
1235 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1238 boost::mutex::scoped_lock lm (_mutex);
1240 if (reduction == _dcp_decode_reduction) {
1242 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1246 _dcp_decode_reduction = reduction;
1247 setup_pieces_unlocked ();
1250 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1254 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1256 boost::mutex::scoped_lock lm (_mutex);
1258 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1259 if (i->content == content) {
1260 return content_time_to_dcp (i, t);
1264 /* We couldn't find this content; perhaps things are being changed over */
1265 return optional<DCPTime>();
1269 shared_ptr<const Playlist>
1270 Player::playlist () const
1272 return _playlist ? _playlist : _film->playlist();
1277 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1279 Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);