2 Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
101 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102 /* The butler must hear about this first, so since we are proxying this through to the butler we must
105 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107 set_video_container_size (_film->frame_size ());
109 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
112 seek (DCPTime (), true);
121 Player::setup_pieces ()
123 boost::mutex::scoped_lock lm (_mutex);
124 setup_pieces_unlocked ();
128 have_video (shared_ptr<Piece> piece)
130 return piece->decoder && piece->decoder->video;
134 have_audio (shared_ptr<Piece> piece)
136 return piece->decoder && piece->decoder->audio;
140 Player::setup_pieces_unlocked ()
142 list<shared_ptr<Piece> > old_pieces = _pieces;
146 _shuffler = new Shuffler();
147 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
149 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
151 if (!i->paths_valid ()) {
155 if (_ignore_video && _ignore_audio && i->text.empty()) {
156 /* We're only interested in text and this content has none */
160 shared_ptr<Decoder> old_decoder;
161 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162 if (j->content == i) {
163 old_decoder = j->decoder;
168 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169 FrameRateChange frc (_film, i);
172 /* Not something that we can decode; e.g. Atmos content */
176 if (decoder->video && _ignore_video) {
177 decoder->video->set_ignore (true);
180 if (decoder->audio && _ignore_audio) {
181 decoder->audio->set_ignore (true);
185 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186 i->set_ignore (true);
190 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
192 dcp->set_decode_referenced (_play_referenced);
193 if (_play_referenced) {
194 dcp->set_forced_reduction (_dcp_decode_reduction);
198 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199 _pieces.push_back (piece);
201 if (decoder->video) {
202 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
206 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
210 if (decoder->audio) {
211 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
214 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
216 while (j != decoder->text.end()) {
217 (*j)->BitmapStart.connect (
218 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
220 (*j)->PlainStart.connect (
221 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
224 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
231 _stream_states.clear ();
232 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233 if (i->content->audio) {
234 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235 _stream_states[j] = StreamState (i, i->content->position ());
240 _black = Empty (_film, _pieces, bind(&have_video, _1));
241 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
243 _last_video_time = DCPTime ();
244 _last_video_eyes = EYES_BOTH;
245 _last_audio_time = DCPTime ();
247 /* Cached value to save recalculating it on every ::pass */
248 _film_length = _film->length ();
252 Player::playlist_content_change (ChangeType type, int property, bool frequent)
254 if (type == CHANGE_TYPE_PENDING) {
255 /* The player content is probably about to change, so we can't carry on
256 until that has happened and we've rebuilt our pieces. Stop pass()
257 and seek() from working until then.
260 } else if (type == CHANGE_TYPE_DONE) {
261 /* A change in our content has gone through. Re-build our pieces. */
264 } else if (type == CHANGE_TYPE_CANCELLED) {
268 Change (type, property, frequent);
272 Player::set_video_container_size (dcp::Size s)
274 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
277 boost::mutex::scoped_lock lm (_mutex);
279 if (s == _video_container_size) {
281 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
285 _video_container_size = s;
287 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
288 _black_image->make_black ();
291 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
295 Player::playlist_change (ChangeType type)
297 if (type == CHANGE_TYPE_DONE) {
300 Change (type, PlayerProperty::PLAYLIST, false);
304 Player::film_change (ChangeType type, Film::Property p)
306 /* Here we should notice Film properties that affect our output, and
307 alert listeners that our output now would be different to how it was
308 last time we were run.
311 if (p == Film::CONTAINER) {
312 Change (type, PlayerProperty::FILM_CONTAINER, false);
313 } else if (p == Film::VIDEO_FRAME_RATE) {
314 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
315 so we need new pieces here.
317 if (type == CHANGE_TYPE_DONE) {
320 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321 } else if (p == Film::AUDIO_PROCESSOR) {
322 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
323 boost::mutex::scoped_lock lm (_mutex);
324 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
326 } else if (p == Film::AUDIO_CHANNELS) {
327 if (type == CHANGE_TYPE_DONE) {
328 boost::mutex::scoped_lock lm (_mutex);
329 _audio_merger.clear ();
334 shared_ptr<PlayerVideo>
335 Player::black_player_video_frame (Eyes eyes) const
337 return shared_ptr<PlayerVideo> (
339 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
342 _video_container_size,
343 _video_container_size,
346 PresetColourConversion::all().front().conversion,
348 boost::weak_ptr<Content>(),
349 boost::optional<Frame>()
355 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
357 DCPTime s = t - piece->content->position ();
358 s = min (piece->content->length_after_trim(_film), s);
359 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
361 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
362 then convert that ContentTime to frames at the content's rate. However this fails for
363 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
364 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
366 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
368 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
372 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
374 /* See comment in dcp_to_content_video */
375 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
376 return d + piece->content->position();
380 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
382 DCPTime s = t - piece->content->position ();
383 s = min (piece->content->length_after_trim(_film), s);
384 /* See notes in dcp_to_content_video */
385 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
389 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
391 /* See comment in dcp_to_content_video */
392 return DCPTime::from_frames (f, _film->audio_frame_rate())
393 - DCPTime (piece->content->trim_start(), piece->frc)
394 + piece->content->position();
398 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
400 DCPTime s = t - piece->content->position ();
401 s = min (piece->content->length_after_trim(_film), s);
402 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
406 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
408 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
411 list<shared_ptr<Font> >
412 Player::get_subtitle_fonts ()
414 boost::mutex::scoped_lock lm (_mutex);
416 list<shared_ptr<Font> > fonts;
417 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
418 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
419 /* XXX: things may go wrong if there are duplicate font IDs
420 with different font files.
422 list<shared_ptr<Font> > f = j->fonts ();
423 copy (f.begin(), f.end(), back_inserter (fonts));
430 /** Set this player never to produce any video data */
432 Player::set_ignore_video ()
434 boost::mutex::scoped_lock lm (_mutex);
435 _ignore_video = true;
436 setup_pieces_unlocked ();
440 Player::set_ignore_audio ()
442 boost::mutex::scoped_lock lm (_mutex);
443 _ignore_audio = true;
444 setup_pieces_unlocked ();
448 Player::set_ignore_text ()
450 boost::mutex::scoped_lock lm (_mutex);
452 setup_pieces_unlocked ();
455 /** Set the player to always burn open texts into the image regardless of the content settings */
457 Player::set_always_burn_open_subtitles ()
459 boost::mutex::scoped_lock lm (_mutex);
460 _always_burn_open_subtitles = true;
463 /** Sets up the player to be faster, possibly at the expense of quality */
467 boost::mutex::scoped_lock lm (_mutex);
469 setup_pieces_unlocked ();
473 Player::set_play_referenced ()
475 boost::mutex::scoped_lock lm (_mutex);
476 _play_referenced = true;
477 setup_pieces_unlocked ();
481 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
483 DCPOMATIC_ASSERT (r);
484 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
485 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
486 if (r->actual_duration() > 0) {
488 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
493 list<ReferencedReelAsset>
494 Player::get_reel_assets ()
496 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
498 list<ReferencedReelAsset> a;
500 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
501 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
506 scoped_ptr<DCPDecoder> decoder;
508 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
513 DCPOMATIC_ASSERT (j->video_frame_rate ());
514 double const cfr = j->video_frame_rate().get();
515 Frame const trim_start = j->trim_start().frames_round (cfr);
516 Frame const trim_end = j->trim_end().frames_round (cfr);
517 int const ffr = _film->video_frame_rate ();
519 /* position in the asset from the start */
520 int64_t offset_from_start = 0;
521 /* position in the asset from the end */
522 int64_t offset_from_end = 0;
523 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
524 /* Assume that main picture duration is the length of the reel */
525 offset_from_end += k->main_picture()->actual_duration();
528 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
530 /* Assume that main picture duration is the length of the reel */
531 int64_t const reel_duration = k->main_picture()->actual_duration();
533 /* See doc/design/trim_reels.svg */
534 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
535 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
537 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
538 if (j->reference_video ()) {
539 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
542 if (j->reference_audio ()) {
543 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
546 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
547 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
550 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
551 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
552 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
556 offset_from_start += reel_duration;
557 offset_from_end -= reel_duration;
567 boost::mutex::scoped_lock lm (_mutex);
568 DCPOMATIC_ASSERT (_film_length);
571 /* We can't pass in this state */
575 if (*_film_length == DCPTime()) {
576 /* Special case of an empty Film; just give one black frame */
577 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
581 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
583 shared_ptr<Piece> earliest_content;
584 optional<DCPTime> earliest_time;
586 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
591 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
592 if (t > i->content->end(_film)) {
596 /* Given two choices at the same time, pick the one with texts so we see it before
599 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
601 earliest_content = i;
615 if (earliest_content) {
619 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
620 earliest_time = _black.position ();
624 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
625 earliest_time = _silent.position ();
632 earliest_content->done = earliest_content->decoder->pass ();
633 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
634 if (dcp && !_play_referenced && dcp->reference_audio()) {
635 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
636 to `hide' the fact that no audio was emitted during the referenced DCP (though
637 we need to behave as though it was).
639 _last_audio_time = dcp->end (_film);
644 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
645 _black.set_position (_black.position() + one_video_frame());
649 DCPTimePeriod period (_silent.period_at_position());
650 if (_last_audio_time) {
651 /* Sometimes the thing that happened last finishes fractionally before
652 or after this silence. Bodge the start time of the silence to fix it.
653 I think this is nothing to worry about since we will just add or
654 remove a little silence at the end of some content.
656 int64_t const error = labs(period.from.get() - _last_audio_time->get());
657 /* Let's not worry about less than a frame at 24fps */
658 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
659 if (error >= too_much_error) {
660 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
662 DCPOMATIC_ASSERT (error < too_much_error);
663 period.from = *_last_audio_time;
665 if (period.duration() > one_video_frame()) {
666 period.to = period.from + one_video_frame();
669 _silent.set_position (period.to);
677 /* Emit any audio that is ready */
679 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
680 of our streams, or the position of the _silent.
682 DCPTime pull_to = *_film_length;
683 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
684 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
685 pull_to = i->second.last_push_end;
688 if (!_silent.done() && _silent.position() < pull_to) {
689 pull_to = _silent.position();
692 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
693 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
694 if (_last_audio_time && i->second < *_last_audio_time) {
695 /* This new data comes before the last we emitted (or the last seek); discard it */
696 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
701 } else if (_last_audio_time && i->second > *_last_audio_time) {
702 /* There's a gap between this data and the last we emitted; fill with silence */
703 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
706 emit_audio (i->first, i->second);
711 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
712 do_emit_video(i->first, i->second);
719 /** @return Open subtitles for the frame at the given time, converted to images */
720 optional<PositionImage>
721 Player::open_subtitles_for_frame (DCPTime time) const
723 list<PositionImage> captions;
724 int const vfr = _film->video_frame_rate();
728 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
731 /* Bitmap subtitles */
732 BOOST_FOREACH (BitmapText i, j.bitmap) {
737 /* i.image will already have been scaled to fit _video_container_size */
738 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
744 lrint (_video_container_size.width * i.rectangle.x),
745 lrint (_video_container_size.height * i.rectangle.y)
751 /* String subtitles (rendered to an image) */
752 if (!j.string.empty ()) {
753 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
754 copy (s.begin(), s.end(), back_inserter (captions));
758 if (captions.empty ()) {
759 return optional<PositionImage> ();
762 return merge (captions);
766 Player::video (weak_ptr<Piece> wp, ContentVideo video)
768 shared_ptr<Piece> piece = wp.lock ();
773 FrameRateChange frc (_film, piece->content);
774 if (frc.skip && (video.frame % 2) == 1) {
778 /* Time of the first frame we will emit */
779 DCPTime const time = content_video_to_dcp (piece, video.frame);
781 /* Discard if it's before the content's period or the last accurate seek. We can't discard
782 if it's after the content's period here as in that case we still need to fill any gap between
783 `now' and the end of the content's period.
785 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
789 /* Fill gaps that we discover now that we have some video which needs to be emitted.
790 This is where we need to fill to.
792 DCPTime fill_to = min (time, piece->content->end(_film));
794 if (_last_video_time) {
795 DCPTime fill_from = max (*_last_video_time, piece->content->position());
797 /* Fill if we have more than half a frame to do */
798 if ((fill_to - fill_from) > one_video_frame() / 2) {
799 LastVideoMap::const_iterator last = _last_video.find (wp);
800 if (_film->three_d()) {
801 Eyes fill_to_eyes = video.eyes;
802 if (fill_to_eyes == EYES_BOTH) {
803 fill_to_eyes = EYES_LEFT;
805 if (fill_to == piece->content->end(_film)) {
806 /* Don't fill after the end of the content */
807 fill_to_eyes = EYES_LEFT;
809 DCPTime j = fill_from;
810 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
811 if (eyes == EYES_BOTH) {
814 while (j < fill_to || eyes != fill_to_eyes) {
815 if (last != _last_video.end()) {
816 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
817 copy->set_eyes (eyes);
818 emit_video (copy, j);
820 emit_video (black_player_video_frame(eyes), j);
822 if (eyes == EYES_RIGHT) {
823 j += one_video_frame();
825 eyes = increment_eyes (eyes);
828 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
829 if (last != _last_video.end()) {
830 emit_video (last->second, j);
832 emit_video (black_player_video_frame(EYES_BOTH), j);
839 _last_video[wp].reset (
842 piece->content->video->crop (),
843 piece->content->video->fade (_film, video.frame),
844 piece->content->video->scale().size (
845 piece->content->video, _video_container_size, _film->frame_size ()
847 _video_container_size,
850 piece->content->video->colour_conversion(),
851 piece->content->video->range(),
858 for (int i = 0; i < frc.repeat; ++i) {
859 if (t < piece->content->end(_film)) {
860 emit_video (_last_video[wp], t);
862 t += one_video_frame ();
867 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
869 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
871 shared_ptr<Piece> piece = wp.lock ();
876 shared_ptr<AudioContent> content = piece->content->audio;
877 DCPOMATIC_ASSERT (content);
879 int const rfr = content->resampled_frame_rate (_film);
881 /* Compute time in the DCP */
882 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
883 /* And the end of this block in the DCP */
884 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
886 /* Remove anything that comes before the start or after the end of the content */
887 if (time < piece->content->position()) {
888 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
890 /* This audio is entirely discarded */
893 content_audio.audio = cut.first;
895 } else if (time > piece->content->end(_film)) {
898 } else if (end > piece->content->end(_film)) {
899 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
900 if (remaining_frames == 0) {
903 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
904 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
905 content_audio.audio = cut;
908 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
912 if (content->gain() != 0) {
913 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
914 gain->apply_gain (content->gain ());
915 content_audio.audio = gain;
920 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
924 if (_audio_processor) {
925 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
930 _audio_merger.push (content_audio.audio, time);
931 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
932 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
936 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
938 shared_ptr<Piece> piece = wp.lock ();
939 shared_ptr<const TextContent> text = wc.lock ();
940 if (!piece || !text) {
944 /* Apply content's subtitle offsets */
945 subtitle.sub.rectangle.x += text->x_offset ();
946 subtitle.sub.rectangle.y += text->y_offset ();
948 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
949 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
950 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
952 /* Apply content's subtitle scale */
953 subtitle.sub.rectangle.width *= text->x_scale ();
954 subtitle.sub.rectangle.height *= text->y_scale ();
957 shared_ptr<Image> image = subtitle.sub.image;
958 /* We will scale the subtitle up to fit _video_container_size */
959 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
960 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
961 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
963 _active_texts[text->type()].add_from (wc, ps, from);
967 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
969 shared_ptr<Piece> piece = wp.lock ();
970 shared_ptr<const TextContent> text = wc.lock ();
971 if (!piece || !text) {
976 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
978 if (from > piece->content->end(_film)) {
982 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
983 s.set_h_position (s.h_position() + text->x_offset ());
984 s.set_v_position (s.v_position() + text->y_offset ());
985 float const xs = text->x_scale();
986 float const ys = text->y_scale();
987 float size = s.size();
989 /* Adjust size to express the common part of the scaling;
990 e.g. if xs = ys = 0.5 we scale size by 2.
992 if (xs > 1e-5 && ys > 1e-5) {
993 size *= 1 / min (1 / xs, 1 / ys);
997 /* Then express aspect ratio changes */
998 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
999 s.set_aspect_adjust (xs / ys);
1002 s.set_in (dcp::Time(from.seconds(), 1000));
1003 ps.string.push_back (StringText (s, text->outline_width()));
1004 ps.add_fonts (text->fonts ());
1007 _active_texts[text->type()].add_from (wc, ps, from);
1011 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1013 shared_ptr<const TextContent> text = wc.lock ();
1018 if (!_active_texts[text->type()].have(wc)) {
1022 shared_ptr<Piece> piece = wp.lock ();
1027 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1029 if (dcp_to > piece->content->end(_film)) {
1033 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1035 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1036 if (text->use() && !always && !text->burn()) {
1037 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1042 Player::seek (DCPTime time, bool accurate)
1044 boost::mutex::scoped_lock lm (_mutex);
1047 /* We can't seek in this state */
1052 _shuffler->clear ();
1057 if (_audio_processor) {
1058 _audio_processor->flush ();
1061 _audio_merger.clear ();
1062 for (int i = 0; i < TEXT_COUNT; ++i) {
1063 _active_texts[i].clear ();
1066 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1067 if (time < i->content->position()) {
1068 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1069 we must seek this (following) content accurately, otherwise when we come to the end of the current
1070 content we may not start right at the beginning of the next, causing a gap (if the next content has
1071 been trimmed to a point between keyframes, or something).
1073 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1075 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1076 /* During; seek to position */
1077 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1080 /* After; this piece is done */
1086 _last_video_time = time;
1087 _last_video_eyes = EYES_LEFT;
1088 _last_audio_time = time;
1090 _last_video_time = optional<DCPTime>();
1091 _last_video_eyes = optional<Eyes>();
1092 _last_audio_time = optional<DCPTime>();
1095 _black.set_position (time);
1096 _silent.set_position (time);
1098 _last_video.clear ();
1102 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1104 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1105 player before the video that requires them.
1107 _delay.push_back (make_pair (pv, time));
1109 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1110 _last_video_time = time + one_video_frame();
1112 _last_video_eyes = increment_eyes (pv->eyes());
1114 if (_delay.size() < 3) {
1118 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1120 do_emit_video (to_do.first, to_do.second);
1124 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1126 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1127 for (int i = 0; i < TEXT_COUNT; ++i) {
1128 _active_texts[i].clear_before (time);
1132 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1134 pv->set_text (subtitles.get ());
1141 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1143 /* Log if the assert below is about to fail */
1144 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1145 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1148 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1149 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1150 Audio (data, time, _film->audio_frame_rate());
1151 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1155 Player::fill_audio (DCPTimePeriod period)
1157 if (period.from == period.to) {
1161 DCPOMATIC_ASSERT (period.from < period.to);
1163 DCPTime t = period.from;
1164 while (t < period.to) {
1165 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1166 Frame const samples = block.frames_round(_film->audio_frame_rate());
1168 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1169 silence->make_silent ();
1170 emit_audio (silence, t);
1177 Player::one_video_frame () const
1179 return DCPTime::from_frames (1, _film->video_frame_rate ());
1182 pair<shared_ptr<AudioBuffers>, DCPTime>
1183 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1185 DCPTime const discard_time = discard_to - time;
1186 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1187 Frame remaining_frames = audio->frames() - discard_frames;
1188 if (remaining_frames <= 0) {
1189 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1191 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1192 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1193 return make_pair(cut, time + discard_time);
1197 Player::set_dcp_decode_reduction (optional<int> reduction)
1199 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1202 boost::mutex::scoped_lock lm (_mutex);
1204 if (reduction == _dcp_decode_reduction) {
1206 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1210 _dcp_decode_reduction = reduction;
1211 setup_pieces_unlocked ();
1214 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1218 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1220 boost::mutex::scoped_lock lm (_mutex);
1222 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1223 if (i->content == content) {
1224 return content_time_to_dcp (i, t);
1228 /* We couldn't find this content; perhaps things are being changed over */
1229 return optional<DCPTime>();