2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::PLAYBACK_LENGTH = 705;
88 Player::Player (shared_ptr<const Film> film)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
104 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
106 , _playlist (playlist_)
108 , _ignore_video (false)
109 , _ignore_audio (false)
110 , _ignore_text (false)
111 , _always_burn_open_subtitles (false)
113 , _tolerant (film->tolerant())
114 , _play_referenced (false)
115 , _audio_merger (_film->audio_frame_rate())
124 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125 /* The butler must hear about this first, so since we are proxying this through to the butler we must
128 _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129 _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130 set_video_container_size (_film->frame_size ());
132 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
135 seek (DCPTime (), true);
144 Player::setup_pieces ()
146 boost::mutex::scoped_lock lm (_mutex);
147 setup_pieces_unlocked ();
152 have_video (shared_ptr<const Content> content)
154 return static_cast<bool>(content->video);
158 have_audio (shared_ptr<const Content> content)
160 return static_cast<bool>(content->audio);
164 Player::setup_pieces_unlocked ()
166 _playback_length = _playlist ? _playlist->length(_film) : _film->length();
168 list<shared_ptr<Piece> > old_pieces = _pieces;
172 _shuffler = new Shuffler();
173 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
175 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
177 if (!i->paths_valid ()) {
181 if (_ignore_video && _ignore_audio && i->text.empty()) {
182 /* We're only interested in text and this content has none */
186 shared_ptr<Decoder> old_decoder;
187 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
188 if (j->content == i) {
189 old_decoder = j->decoder;
194 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
195 FrameRateChange frc (_film, i);
198 /* Not something that we can decode; e.g. Atmos content */
202 if (decoder->video && _ignore_video) {
203 decoder->video->set_ignore (true);
206 if (decoder->audio && _ignore_audio) {
207 decoder->audio->set_ignore (true);
211 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
212 i->set_ignore (true);
216 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
218 dcp->set_decode_referenced (_play_referenced);
219 if (_play_referenced) {
220 dcp->set_forced_reduction (_dcp_decode_reduction);
224 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
225 _pieces.push_back (piece);
227 if (decoder->video) {
228 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
229 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
232 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
236 if (decoder->audio) {
237 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
242 while (j != decoder->text.end()) {
243 (*j)->BitmapStart.connect (
244 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
246 (*j)->PlainStart.connect (
247 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
257 _stream_states.clear ();
258 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
259 if (i->content->audio) {
260 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
261 _stream_states[j] = StreamState (i, i->content->position ());
266 _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
267 _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
269 _last_video_time = DCPTime ();
270 _last_video_eyes = EYES_BOTH;
271 _last_audio_time = DCPTime ();
275 Player::playlist_content_change (ChangeType type, int property, bool frequent)
277 if (type == CHANGE_TYPE_PENDING) {
278 /* The player content is probably about to change, so we can't carry on
279 until that has happened and we've rebuilt our pieces. Stop pass()
280 and seek() from working until then.
283 } else if (type == CHANGE_TYPE_DONE) {
284 /* A change in our content has gone through. Re-build our pieces. */
287 } else if (type == CHANGE_TYPE_CANCELLED) {
291 Change (type, property, frequent);
295 Player::set_video_container_size (dcp::Size s)
297 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
300 boost::mutex::scoped_lock lm (_mutex);
302 if (s == _video_container_size) {
304 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
308 _video_container_size = s;
310 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
311 _black_image->make_black ();
314 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318 Player::playlist_change (ChangeType type)
320 if (type == CHANGE_TYPE_DONE) {
323 Change (type, PlayerProperty::PLAYLIST, false);
327 Player::film_change (ChangeType type, Film::Property p)
329 /* Here we should notice Film properties that affect our output, and
330 alert listeners that our output now would be different to how it was
331 last time we were run.
334 if (p == Film::CONTAINER) {
335 Change (type, PlayerProperty::FILM_CONTAINER, false);
336 } else if (p == Film::VIDEO_FRAME_RATE) {
337 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
338 so we need new pieces here.
340 if (type == CHANGE_TYPE_DONE) {
343 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
344 } else if (p == Film::AUDIO_PROCESSOR) {
345 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
346 boost::mutex::scoped_lock lm (_mutex);
347 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
349 } else if (p == Film::AUDIO_CHANNELS) {
350 if (type == CHANGE_TYPE_DONE) {
351 boost::mutex::scoped_lock lm (_mutex);
352 _audio_merger.clear ();
357 shared_ptr<PlayerVideo>
358 Player::black_player_video_frame (Eyes eyes) const
360 return shared_ptr<PlayerVideo> (
362 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
365 _video_container_size,
366 _video_container_size,
369 PresetColourConversion::all().front().conversion,
371 boost::weak_ptr<Content>(),
372 boost::optional<Frame>(),
379 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(_film), s);
383 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
385 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
386 then convert that ContentTime to frames at the content's rate. However this fails for
387 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
388 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
390 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
392 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
396 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
398 /* See comment in dcp_to_content_video */
399 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
400 return d + piece->content->position();
404 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
406 DCPTime s = t - piece->content->position ();
407 s = min (piece->content->length_after_trim(_film), s);
408 /* See notes in dcp_to_content_video */
409 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
413 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
415 /* See comment in dcp_to_content_video */
416 return DCPTime::from_frames (f, _film->audio_frame_rate())
417 - DCPTime (piece->content->trim_start(), piece->frc)
418 + piece->content->position();
422 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
424 DCPTime s = t - piece->content->position ();
425 s = min (piece->content->length_after_trim(_film), s);
426 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
430 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
432 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
435 list<shared_ptr<Font> >
436 Player::get_subtitle_fonts ()
438 boost::mutex::scoped_lock lm (_mutex);
440 list<shared_ptr<Font> > fonts;
441 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
442 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
443 /* XXX: things may go wrong if there are duplicate font IDs
444 with different font files.
446 list<shared_ptr<Font> > f = j->fonts ();
447 copy (f.begin(), f.end(), back_inserter (fonts));
454 /** Set this player never to produce any video data */
456 Player::set_ignore_video ()
458 boost::mutex::scoped_lock lm (_mutex);
459 _ignore_video = true;
460 setup_pieces_unlocked ();
464 Player::set_ignore_audio ()
466 boost::mutex::scoped_lock lm (_mutex);
467 _ignore_audio = true;
468 setup_pieces_unlocked ();
472 Player::set_ignore_text ()
474 boost::mutex::scoped_lock lm (_mutex);
476 setup_pieces_unlocked ();
479 /** Set the player to always burn open texts into the image regardless of the content settings */
481 Player::set_always_burn_open_subtitles ()
483 boost::mutex::scoped_lock lm (_mutex);
484 _always_burn_open_subtitles = true;
487 /** Sets up the player to be faster, possibly at the expense of quality */
491 boost::mutex::scoped_lock lm (_mutex);
493 setup_pieces_unlocked ();
497 Player::set_play_referenced ()
499 boost::mutex::scoped_lock lm (_mutex);
500 _play_referenced = true;
501 setup_pieces_unlocked ();
505 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
507 DCPOMATIC_ASSERT (r);
508 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
509 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
510 if (r->actual_duration() > 0) {
512 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
517 list<ReferencedReelAsset>
518 Player::get_reel_assets ()
520 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
522 list<ReferencedReelAsset> a;
524 BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
525 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
530 scoped_ptr<DCPDecoder> decoder;
532 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
537 DCPOMATIC_ASSERT (j->video_frame_rate ());
538 double const cfr = j->video_frame_rate().get();
539 Frame const trim_start = j->trim_start().frames_round (cfr);
540 Frame const trim_end = j->trim_end().frames_round (cfr);
541 int const ffr = _film->video_frame_rate ();
543 /* position in the asset from the start */
544 int64_t offset_from_start = 0;
545 /* position in the asset from the end */
546 int64_t offset_from_end = 0;
547 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
548 /* Assume that main picture duration is the length of the reel */
549 offset_from_end += k->main_picture()->actual_duration();
552 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
554 /* Assume that main picture duration is the length of the reel */
555 int64_t const reel_duration = k->main_picture()->actual_duration();
557 /* See doc/design/trim_reels.svg */
558 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
559 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
561 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
562 if (j->reference_video ()) {
563 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
566 if (j->reference_audio ()) {
567 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
570 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
571 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
574 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
575 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
576 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
580 offset_from_start += reel_duration;
581 offset_from_end -= reel_duration;
591 boost::mutex::scoped_lock lm (_mutex);
594 /* We can't pass in this state */
598 if (_playback_length == DCPTime()) {
599 /* Special; just give one black frame */
600 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
604 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
606 shared_ptr<Piece> earliest_content;
607 optional<DCPTime> earliest_time;
609 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
614 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
615 if (t > i->content->end(_film)) {
619 /* Given two choices at the same time, pick the one with texts so we see it before
622 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
624 earliest_content = i;
638 if (earliest_content) {
642 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
643 earliest_time = _black.position ();
647 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
648 earliest_time = _silent.position ();
655 earliest_content->done = earliest_content->decoder->pass ();
656 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
657 if (dcp && !_play_referenced && dcp->reference_audio()) {
658 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
659 to `hide' the fact that no audio was emitted during the referenced DCP (though
660 we need to behave as though it was).
662 _last_audio_time = dcp->end (_film);
667 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
668 _black.set_position (_black.position() + one_video_frame());
672 DCPTimePeriod period (_silent.period_at_position());
673 if (_last_audio_time) {
674 /* Sometimes the thing that happened last finishes fractionally before
675 or after this silence. Bodge the start time of the silence to fix it.
676 I think this is nothing to worry about since we will just add or
677 remove a little silence at the end of some content.
679 int64_t const error = labs(period.from.get() - _last_audio_time->get());
680 /* Let's not worry about less than a frame at 24fps */
681 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
682 if (error >= too_much_error) {
683 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
685 DCPOMATIC_ASSERT (error < too_much_error);
686 period.from = *_last_audio_time;
688 if (period.duration() > one_video_frame()) {
689 period.to = period.from + one_video_frame();
692 _silent.set_position (period.to);
700 /* Emit any audio that is ready */
702 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
703 of our streams, or the position of the _silent.
705 DCPTime pull_to = _playback_length;
706 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
707 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
708 pull_to = i->second.last_push_end;
711 if (!_silent.done() && _silent.position() < pull_to) {
712 pull_to = _silent.position();
715 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
716 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
717 if (_last_audio_time && i->second < *_last_audio_time) {
718 /* This new data comes before the last we emitted (or the last seek); discard it */
719 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
724 } else if (_last_audio_time && i->second > *_last_audio_time) {
725 /* There's a gap between this data and the last we emitted; fill with silence */
726 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
729 emit_audio (i->first, i->second);
734 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
735 do_emit_video(i->first, i->second);
742 /** @return Open subtitles for the frame at the given time, converted to images */
743 optional<PositionImage>
744 Player::open_subtitles_for_frame (DCPTime time) const
746 list<PositionImage> captions;
747 int const vfr = _film->video_frame_rate();
751 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
754 /* Bitmap subtitles */
755 BOOST_FOREACH (BitmapText i, j.bitmap) {
760 /* i.image will already have been scaled to fit _video_container_size */
761 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
767 lrint (_video_container_size.width * i.rectangle.x),
768 lrint (_video_container_size.height * i.rectangle.y)
774 /* String subtitles (rendered to an image) */
775 if (!j.string.empty ()) {
776 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
777 copy (s.begin(), s.end(), back_inserter (captions));
781 if (captions.empty ()) {
782 return optional<PositionImage> ();
785 return merge (captions);
789 Player::video (weak_ptr<Piece> wp, ContentVideo video)
791 shared_ptr<Piece> piece = wp.lock ();
796 FrameRateChange frc (_film, piece->content);
797 if (frc.skip && (video.frame % 2) == 1) {
801 /* Time of the first frame we will emit */
802 DCPTime const time = content_video_to_dcp (piece, video.frame);
804 /* Discard if it's before the content's period or the last accurate seek. We can't discard
805 if it's after the content's period here as in that case we still need to fill any gap between
806 `now' and the end of the content's period.
808 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
812 /* Fill gaps that we discover now that we have some video which needs to be emitted.
813 This is where we need to fill to.
815 DCPTime fill_to = min (time, piece->content->end(_film));
817 if (_last_video_time) {
818 DCPTime fill_from = max (*_last_video_time, piece->content->position());
820 /* Fill if we have more than half a frame to do */
821 if ((fill_to - fill_from) > one_video_frame() / 2) {
822 LastVideoMap::const_iterator last = _last_video.find (wp);
823 if (_film->three_d()) {
824 Eyes fill_to_eyes = video.eyes;
825 if (fill_to_eyes == EYES_BOTH) {
826 fill_to_eyes = EYES_LEFT;
828 if (fill_to == piece->content->end(_film)) {
829 /* Don't fill after the end of the content */
830 fill_to_eyes = EYES_LEFT;
832 DCPTime j = fill_from;
833 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
834 if (eyes == EYES_BOTH) {
837 while (j < fill_to || eyes != fill_to_eyes) {
838 if (last != _last_video.end()) {
839 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
840 copy->set_eyes (eyes);
841 emit_video (copy, j);
843 emit_video (black_player_video_frame(eyes), j);
845 if (eyes == EYES_RIGHT) {
846 j += one_video_frame();
848 eyes = increment_eyes (eyes);
851 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
852 if (last != _last_video.end()) {
853 emit_video (last->second, j);
855 emit_video (black_player_video_frame(EYES_BOTH), j);
862 _last_video[wp].reset (
865 piece->content->video->crop (),
866 piece->content->video->fade (_film, video.frame),
867 piece->content->video->scale().size (
868 piece->content->video, _video_container_size, _film->frame_size ()
870 _video_container_size,
873 piece->content->video->colour_conversion(),
874 piece->content->video->range(),
882 for (int i = 0; i < frc.repeat; ++i) {
883 if (t < piece->content->end(_film)) {
884 emit_video (_last_video[wp], t);
886 t += one_video_frame ();
891 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
893 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
895 shared_ptr<Piece> piece = wp.lock ();
900 shared_ptr<AudioContent> content = piece->content->audio;
901 DCPOMATIC_ASSERT (content);
903 int const rfr = content->resampled_frame_rate (_film);
905 /* Compute time in the DCP */
906 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
907 /* And the end of this block in the DCP */
908 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
910 /* Remove anything that comes before the start or after the end of the content */
911 if (time < piece->content->position()) {
912 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
914 /* This audio is entirely discarded */
917 content_audio.audio = cut.first;
919 } else if (time > piece->content->end(_film)) {
922 } else if (end > piece->content->end(_film)) {
923 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
924 if (remaining_frames == 0) {
927 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
930 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
934 if (content->gain() != 0) {
935 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
936 gain->apply_gain (content->gain ());
937 content_audio.audio = gain;
942 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
946 if (_audio_processor) {
947 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
952 _audio_merger.push (content_audio.audio, time);
953 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
954 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
958 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
960 shared_ptr<Piece> piece = wp.lock ();
961 shared_ptr<const TextContent> text = wc.lock ();
962 if (!piece || !text) {
966 /* Apply content's subtitle offsets */
967 subtitle.sub.rectangle.x += text->x_offset ();
968 subtitle.sub.rectangle.y += text->y_offset ();
970 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
971 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
972 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
974 /* Apply content's subtitle scale */
975 subtitle.sub.rectangle.width *= text->x_scale ();
976 subtitle.sub.rectangle.height *= text->y_scale ();
979 shared_ptr<Image> image = subtitle.sub.image;
981 /* We will scale the subtitle up to fit _video_container_size */
982 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
983 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
984 if (width == 0 || height == 0) {
988 dcp::Size scaled_size (width, height);
989 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
990 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
992 _active_texts[text->type()].add_from (wc, ps, from);
996 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
998 shared_ptr<Piece> piece = wp.lock ();
999 shared_ptr<const TextContent> text = wc.lock ();
1000 if (!piece || !text) {
1005 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1007 if (from > piece->content->end(_film)) {
1011 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1012 s.set_h_position (s.h_position() + text->x_offset ());
1013 s.set_v_position (s.v_position() + text->y_offset ());
1014 float const xs = text->x_scale();
1015 float const ys = text->y_scale();
1016 float size = s.size();
1018 /* Adjust size to express the common part of the scaling;
1019 e.g. if xs = ys = 0.5 we scale size by 2.
1021 if (xs > 1e-5 && ys > 1e-5) {
1022 size *= 1 / min (1 / xs, 1 / ys);
1026 /* Then express aspect ratio changes */
1027 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1028 s.set_aspect_adjust (xs / ys);
1031 s.set_in (dcp::Time(from.seconds(), 1000));
1032 ps.string.push_back (StringText (s, text->outline_width()));
1033 ps.add_fonts (text->fonts ());
1036 _active_texts[text->type()].add_from (wc, ps, from);
1040 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1042 shared_ptr<const TextContent> text = wc.lock ();
1047 if (!_active_texts[text->type()].have(wc)) {
1051 shared_ptr<Piece> piece = wp.lock ();
1056 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1058 if (dcp_to > piece->content->end(_film)) {
1062 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1064 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1065 if (text->use() && !always && !text->burn()) {
1066 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1071 Player::seek (DCPTime time, bool accurate)
1073 boost::mutex::scoped_lock lm (_mutex);
1076 /* We can't seek in this state */
1081 _shuffler->clear ();
1086 if (_audio_processor) {
1087 _audio_processor->flush ();
1090 _audio_merger.clear ();
1091 for (int i = 0; i < TEXT_COUNT; ++i) {
1092 _active_texts[i].clear ();
1095 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1096 if (time < i->content->position()) {
1097 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1098 we must seek this (following) content accurately, otherwise when we come to the end of the current
1099 content we may not start right at the beginning of the next, causing a gap (if the next content has
1100 been trimmed to a point between keyframes, or something).
1102 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1104 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1105 /* During; seek to position */
1106 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1109 /* After; this piece is done */
1115 _last_video_time = time;
1116 _last_video_eyes = EYES_LEFT;
1117 _last_audio_time = time;
1119 _last_video_time = optional<DCPTime>();
1120 _last_video_eyes = optional<Eyes>();
1121 _last_audio_time = optional<DCPTime>();
1124 _black.set_position (time);
1125 _silent.set_position (time);
1127 _last_video.clear ();
1131 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1133 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1134 player before the video that requires them.
1136 _delay.push_back (make_pair (pv, time));
1138 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1139 _last_video_time = time + one_video_frame();
1141 _last_video_eyes = increment_eyes (pv->eyes());
1143 if (_delay.size() < 3) {
1147 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1149 do_emit_video (to_do.first, to_do.second);
1153 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1155 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1156 for (int i = 0; i < TEXT_COUNT; ++i) {
1157 _active_texts[i].clear_before (time);
1161 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1163 pv->set_text (subtitles.get ());
1170 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1172 /* Log if the assert below is about to fail */
1173 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1174 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1177 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1178 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1179 Audio (data, time, _film->audio_frame_rate());
1180 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1184 Player::fill_audio (DCPTimePeriod period)
1186 if (period.from == period.to) {
1190 DCPOMATIC_ASSERT (period.from < period.to);
1192 DCPTime t = period.from;
1193 while (t < period.to) {
1194 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1195 Frame const samples = block.frames_round(_film->audio_frame_rate());
1197 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1198 silence->make_silent ();
1199 emit_audio (silence, t);
1206 Player::one_video_frame () const
1208 return DCPTime::from_frames (1, _film->video_frame_rate ());
1211 pair<shared_ptr<AudioBuffers>, DCPTime>
1212 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1214 DCPTime const discard_time = discard_to - time;
1215 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1216 Frame remaining_frames = audio->frames() - discard_frames;
1217 if (remaining_frames <= 0) {
1218 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1220 shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1221 return make_pair(cut, time + discard_time);
1225 Player::set_dcp_decode_reduction (optional<int> reduction)
1227 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1230 boost::mutex::scoped_lock lm (_mutex);
1232 if (reduction == _dcp_decode_reduction) {
1234 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1238 _dcp_decode_reduction = reduction;
1239 setup_pieces_unlocked ();
1242 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1246 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1248 boost::mutex::scoped_lock lm (_mutex);
1250 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1251 if (i->content == content) {
1252 return content_time_to_dcp (i, t);
1256 /* We couldn't find this content; perhaps things are being changed over */
1257 return optional<DCPTime>();
1261 shared_ptr<const Playlist>
1262 Player::playlist () const
1264 return _playlist ? _playlist : _film->playlist();