2 Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
101 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102 /* The butler must hear about this first, so since we are proxying this through to the butler we must
105 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107 set_video_container_size (_film->frame_size ());
109 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
112 seek (DCPTime (), true);
121 Player::setup_pieces ()
123 boost::mutex::scoped_lock lm (_mutex);
124 setup_pieces_unlocked ();
128 have_video (shared_ptr<Piece> piece)
130 return piece->decoder && piece->decoder->video;
134 have_audio (shared_ptr<Piece> piece)
136 return piece->decoder && piece->decoder->audio;
140 Player::setup_pieces_unlocked ()
142 list<shared_ptr<Piece> > old_pieces = _pieces;
146 _shuffler = new Shuffler();
147 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
149 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
151 if (!i->paths_valid ()) {
155 if (_ignore_video && _ignore_audio && i->text.empty()) {
156 /* We're only interested in text and this content has none */
160 shared_ptr<Decoder> old_decoder;
161 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162 if (j->content == i) {
163 old_decoder = j->decoder;
168 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169 FrameRateChange frc (_film, i);
172 /* Not something that we can decode; e.g. Atmos content */
176 if (decoder->video && _ignore_video) {
177 decoder->video->set_ignore (true);
180 if (decoder->audio && _ignore_audio) {
181 decoder->audio->set_ignore (true);
185 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186 i->set_ignore (true);
190 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
192 dcp->set_decode_referenced (_play_referenced);
193 if (_play_referenced) {
194 dcp->set_forced_reduction (_dcp_decode_reduction);
198 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199 _pieces.push_back (piece);
201 if (decoder->video) {
202 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
206 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
210 if (decoder->audio) {
211 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
214 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
216 while (j != decoder->text.end()) {
217 (*j)->BitmapStart.connect (
218 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
220 (*j)->PlainStart.connect (
221 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
224 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
231 _stream_states.clear ();
232 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233 if (i->content->audio) {
234 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235 _stream_states[j] = StreamState (i, i->content->position ());
240 _black = Empty (_film, _pieces, bind(&have_video, _1));
241 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
243 _last_video_time = DCPTime ();
244 _last_video_eyes = EYES_BOTH;
245 _last_audio_time = DCPTime ();
247 /* Cached value to save recalculating it on every ::pass */
248 _film_length = _film->length ();
252 Player::playlist_content_change (ChangeType type, int property, bool frequent)
254 if (type == CHANGE_TYPE_PENDING) {
255 boost::mutex::scoped_lock lm (_mutex);
256 /* The player content is probably about to change, so we can't carry on
257 until that has happened and we've rebuilt our pieces. Stop pass()
258 and seek() from working until then.
261 } else if (type == CHANGE_TYPE_DONE) {
262 /* A change in our content has gone through. Re-build our pieces. */
265 } else if (type == CHANGE_TYPE_CANCELLED) {
266 boost::mutex::scoped_lock lm (_mutex);
270 Change (type, property, frequent);
274 Player::set_video_container_size (dcp::Size s)
276 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
279 boost::mutex::scoped_lock lm (_mutex);
281 if (s == _video_container_size) {
283 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
287 _video_container_size = s;
289 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
290 _black_image->make_black ();
293 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
297 Player::playlist_change (ChangeType type)
299 if (type == CHANGE_TYPE_DONE) {
302 Change (type, PlayerProperty::PLAYLIST, false);
306 Player::film_change (ChangeType type, Film::Property p)
308 /* Here we should notice Film properties that affect our output, and
309 alert listeners that our output now would be different to how it was
310 last time we were run.
313 if (p == Film::CONTAINER) {
314 Change (type, PlayerProperty::FILM_CONTAINER, false);
315 } else if (p == Film::VIDEO_FRAME_RATE) {
316 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
317 so we need new pieces here.
319 if (type == CHANGE_TYPE_DONE) {
322 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
323 } else if (p == Film::AUDIO_PROCESSOR) {
324 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
325 boost::mutex::scoped_lock lm (_mutex);
326 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
328 } else if (p == Film::AUDIO_CHANNELS) {
329 if (type == CHANGE_TYPE_DONE) {
330 boost::mutex::scoped_lock lm (_mutex);
331 _audio_merger.clear ();
336 shared_ptr<PlayerVideo>
337 Player::black_player_video_frame (Eyes eyes) const
339 return shared_ptr<PlayerVideo> (
341 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344 _video_container_size,
345 _video_container_size,
348 PresetColourConversion::all().front().conversion,
350 boost::weak_ptr<Content>(),
351 boost::optional<Frame>()
357 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
359 DCPTime s = t - piece->content->position ();
360 s = min (piece->content->length_after_trim(_film), s);
361 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
363 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
364 then convert that ContentTime to frames at the content's rate. However this fails for
365 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
366 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
368 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
370 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
374 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
376 /* See comment in dcp_to_content_video */
377 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
378 return d + piece->content->position();
382 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
384 DCPTime s = t - piece->content->position ();
385 s = min (piece->content->length_after_trim(_film), s);
386 /* See notes in dcp_to_content_video */
387 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
391 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
393 /* See comment in dcp_to_content_video */
394 return DCPTime::from_frames (f, _film->audio_frame_rate())
395 - DCPTime (piece->content->trim_start(), piece->frc)
396 + piece->content->position();
400 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
402 DCPTime s = t - piece->content->position ();
403 s = min (piece->content->length_after_trim(_film), s);
404 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
408 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
410 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
413 list<shared_ptr<Font> >
414 Player::get_subtitle_fonts ()
416 boost::mutex::scoped_lock lm (_mutex);
418 list<shared_ptr<Font> > fonts;
419 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
420 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
421 /* XXX: things may go wrong if there are duplicate font IDs
422 with different font files.
424 list<shared_ptr<Font> > f = j->fonts ();
425 copy (f.begin(), f.end(), back_inserter (fonts));
432 /** Set this player never to produce any video data */
434 Player::set_ignore_video ()
436 boost::mutex::scoped_lock lm (_mutex);
437 _ignore_video = true;
438 setup_pieces_unlocked ();
442 Player::set_ignore_audio ()
444 boost::mutex::scoped_lock lm (_mutex);
445 _ignore_audio = true;
446 setup_pieces_unlocked ();
450 Player::set_ignore_text ()
452 boost::mutex::scoped_lock lm (_mutex);
454 setup_pieces_unlocked ();
457 /** Set the player to always burn open texts into the image regardless of the content settings */
459 Player::set_always_burn_open_subtitles ()
461 boost::mutex::scoped_lock lm (_mutex);
462 _always_burn_open_subtitles = true;
465 /** Sets up the player to be faster, possibly at the expense of quality */
469 boost::mutex::scoped_lock lm (_mutex);
471 setup_pieces_unlocked ();
475 Player::set_play_referenced ()
477 boost::mutex::scoped_lock lm (_mutex);
478 _play_referenced = true;
479 setup_pieces_unlocked ();
483 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
485 DCPOMATIC_ASSERT (r);
486 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
487 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
488 if (r->actual_duration() > 0) {
490 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
495 list<ReferencedReelAsset>
496 Player::get_reel_assets ()
498 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
500 list<ReferencedReelAsset> a;
502 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
503 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
508 scoped_ptr<DCPDecoder> decoder;
510 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
515 DCPOMATIC_ASSERT (j->video_frame_rate ());
516 double const cfr = j->video_frame_rate().get();
517 Frame const trim_start = j->trim_start().frames_round (cfr);
518 Frame const trim_end = j->trim_end().frames_round (cfr);
519 int const ffr = _film->video_frame_rate ();
521 /* position in the asset from the start */
522 int64_t offset_from_start = 0;
523 /* position in the asset from the end */
524 int64_t offset_from_end = 0;
525 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
526 /* Assume that main picture duration is the length of the reel */
527 offset_from_end += k->main_picture()->actual_duration();
530 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
532 /* Assume that main picture duration is the length of the reel */
533 int64_t const reel_duration = k->main_picture()->actual_duration();
535 /* See doc/design/trim_reels.svg */
536 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
537 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
539 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
540 if (j->reference_video ()) {
541 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
544 if (j->reference_audio ()) {
545 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
548 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
549 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
552 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
553 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
554 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
558 offset_from_start += reel_duration;
559 offset_from_end -= reel_duration;
569 boost::mutex::scoped_lock lm (_mutex);
570 DCPOMATIC_ASSERT (_film_length);
573 /* We can't pass in this state */
577 if (*_film_length == DCPTime()) {
578 /* Special case of an empty Film; just give one black frame */
579 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
583 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
585 shared_ptr<Piece> earliest_content;
586 optional<DCPTime> earliest_time;
588 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
593 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
594 if (t > i->content->end(_film)) {
598 /* Given two choices at the same time, pick the one with texts so we see it before
601 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
603 earliest_content = i;
617 if (earliest_content) {
621 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
622 earliest_time = _black.position ();
626 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
627 earliest_time = _silent.position ();
634 earliest_content->done = earliest_content->decoder->pass ();
635 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
636 if (dcp && !_play_referenced && dcp->reference_audio()) {
637 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
638 to `hide' the fact that no audio was emitted during the referenced DCP (though
639 we need to behave as though it was).
641 _last_audio_time = dcp->end (_film);
646 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
647 _black.set_position (_black.position() + one_video_frame());
651 DCPTimePeriod period (_silent.period_at_position());
652 if (_last_audio_time) {
653 /* Sometimes the thing that happened last finishes fractionally before
654 or after this silence. Bodge the start time of the silence to fix it.
655 I think this is nothing to worry about since we will just add or
656 remove a little silence at the end of some content.
658 int64_t const error = labs(period.from.get() - _last_audio_time->get());
659 /* Let's not worry about less than a frame at 24fps */
660 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
661 if (error >= too_much_error) {
662 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
664 DCPOMATIC_ASSERT (error < too_much_error);
665 period.from = *_last_audio_time;
667 if (period.duration() > one_video_frame()) {
668 period.to = period.from + one_video_frame();
671 _silent.set_position (period.to);
679 /* Emit any audio that is ready */
681 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
682 of our streams, or the position of the _silent.
684 DCPTime pull_to = *_film_length;
685 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
686 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
687 pull_to = i->second.last_push_end;
690 if (!_silent.done() && _silent.position() < pull_to) {
691 pull_to = _silent.position();
694 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
695 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
696 if (_last_audio_time && i->second < *_last_audio_time) {
697 /* This new data comes before the last we emitted (or the last seek); discard it */
698 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
703 } else if (_last_audio_time && i->second > *_last_audio_time) {
704 /* There's a gap between this data and the last we emitted; fill with silence */
705 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
708 emit_audio (i->first, i->second);
713 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
714 do_emit_video(i->first, i->second);
721 /** @return Open subtitles for the frame at the given time, converted to images */
722 optional<PositionImage>
723 Player::open_subtitles_for_frame (DCPTime time) const
725 list<PositionImage> captions;
726 int const vfr = _film->video_frame_rate();
730 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
733 /* Bitmap subtitles */
734 BOOST_FOREACH (BitmapText i, j.bitmap) {
739 /* i.image will already have been scaled to fit _video_container_size */
740 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
746 lrint (_video_container_size.width * i.rectangle.x),
747 lrint (_video_container_size.height * i.rectangle.y)
753 /* String subtitles (rendered to an image) */
754 if (!j.string.empty ()) {
755 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
756 copy (s.begin(), s.end(), back_inserter (captions));
760 if (captions.empty ()) {
761 return optional<PositionImage> ();
764 return merge (captions);
768 Player::video (weak_ptr<Piece> wp, ContentVideo video)
770 shared_ptr<Piece> piece = wp.lock ();
775 FrameRateChange frc (_film, piece->content);
776 if (frc.skip && (video.frame % 2) == 1) {
780 /* Time of the first frame we will emit */
781 DCPTime const time = content_video_to_dcp (piece, video.frame);
783 /* Discard if it's before the content's period or the last accurate seek. We can't discard
784 if it's after the content's period here as in that case we still need to fill any gap between
785 `now' and the end of the content's period.
787 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
791 /* Fill gaps that we discover now that we have some video which needs to be emitted.
792 This is where we need to fill to.
794 DCPTime fill_to = min (time, piece->content->end(_film));
796 if (_last_video_time) {
797 DCPTime fill_from = max (*_last_video_time, piece->content->position());
799 /* Fill if we have more than half a frame to do */
800 if ((fill_to - fill_from) > one_video_frame() / 2) {
801 LastVideoMap::const_iterator last = _last_video.find (wp);
802 if (_film->three_d()) {
803 Eyes fill_to_eyes = video.eyes;
804 if (fill_to_eyes == EYES_BOTH) {
805 fill_to_eyes = EYES_LEFT;
807 if (fill_to == piece->content->end(_film)) {
808 /* Don't fill after the end of the content */
809 fill_to_eyes = EYES_LEFT;
811 DCPTime j = fill_from;
812 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
813 if (eyes == EYES_BOTH) {
816 while (j < fill_to || eyes != fill_to_eyes) {
817 if (last != _last_video.end()) {
818 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
819 copy->set_eyes (eyes);
820 emit_video (copy, j);
822 emit_video (black_player_video_frame(eyes), j);
824 if (eyes == EYES_RIGHT) {
825 j += one_video_frame();
827 eyes = increment_eyes (eyes);
830 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
831 if (last != _last_video.end()) {
832 emit_video (last->second, j);
834 emit_video (black_player_video_frame(EYES_BOTH), j);
841 _last_video[wp].reset (
844 piece->content->video->crop (),
845 piece->content->video->fade (_film, video.frame),
846 piece->content->video->scale().size (
847 piece->content->video, _video_container_size, _film->frame_size ()
849 _video_container_size,
852 piece->content->video->colour_conversion(),
853 piece->content->video->range(),
860 for (int i = 0; i < frc.repeat; ++i) {
861 if (t < piece->content->end(_film)) {
862 emit_video (_last_video[wp], t);
864 t += one_video_frame ();
869 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
871 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
873 shared_ptr<Piece> piece = wp.lock ();
878 shared_ptr<AudioContent> content = piece->content->audio;
879 DCPOMATIC_ASSERT (content);
881 int const rfr = content->resampled_frame_rate (_film);
883 /* Compute time in the DCP */
884 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
885 /* And the end of this block in the DCP */
886 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
888 /* Remove anything that comes before the start or after the end of the content */
889 if (time < piece->content->position()) {
890 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
892 /* This audio is entirely discarded */
895 content_audio.audio = cut.first;
897 } else if (time > piece->content->end(_film)) {
900 } else if (end > piece->content->end(_film)) {
901 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
902 if (remaining_frames == 0) {
905 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
906 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
907 content_audio.audio = cut;
910 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
914 if (content->gain() != 0) {
915 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
916 gain->apply_gain (content->gain ());
917 content_audio.audio = gain;
922 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
926 if (_audio_processor) {
927 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
932 _audio_merger.push (content_audio.audio, time);
933 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
934 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
938 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
940 shared_ptr<Piece> piece = wp.lock ();
941 shared_ptr<const TextContent> text = wc.lock ();
942 if (!piece || !text) {
946 /* Apply content's subtitle offsets */
947 subtitle.sub.rectangle.x += text->x_offset ();
948 subtitle.sub.rectangle.y += text->y_offset ();
950 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
951 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
952 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
954 /* Apply content's subtitle scale */
955 subtitle.sub.rectangle.width *= text->x_scale ();
956 subtitle.sub.rectangle.height *= text->y_scale ();
959 shared_ptr<Image> image = subtitle.sub.image;
960 /* We will scale the subtitle up to fit _video_container_size */
961 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
962 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
963 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
965 _active_texts[text->type()].add_from (wc, ps, from);
969 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
971 shared_ptr<Piece> piece = wp.lock ();
972 shared_ptr<const TextContent> text = wc.lock ();
973 if (!piece || !text) {
978 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
980 if (from > piece->content->end(_film)) {
984 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
985 s.set_h_position (s.h_position() + text->x_offset ());
986 s.set_v_position (s.v_position() + text->y_offset ());
987 float const xs = text->x_scale();
988 float const ys = text->y_scale();
989 float size = s.size();
991 /* Adjust size to express the common part of the scaling;
992 e.g. if xs = ys = 0.5 we scale size by 2.
994 if (xs > 1e-5 && ys > 1e-5) {
995 size *= 1 / min (1 / xs, 1 / ys);
999 /* Then express aspect ratio changes */
1000 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1001 s.set_aspect_adjust (xs / ys);
1004 s.set_in (dcp::Time(from.seconds(), 1000));
1005 ps.string.push_back (StringText (s, text->outline_width()));
1006 ps.add_fonts (text->fonts ());
1009 _active_texts[text->type()].add_from (wc, ps, from);
1013 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1015 shared_ptr<const TextContent> text = wc.lock ();
1020 if (!_active_texts[text->type()].have(wc)) {
1024 shared_ptr<Piece> piece = wp.lock ();
1029 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1031 if (dcp_to > piece->content->end(_film)) {
1035 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1037 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1038 if (text->use() && !always && !text->burn()) {
1039 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1044 Player::seek (DCPTime time, bool accurate)
1046 boost::mutex::scoped_lock lm (_mutex);
1049 /* We can't seek in this state */
1054 _shuffler->clear ();
1059 if (_audio_processor) {
1060 _audio_processor->flush ();
1063 _audio_merger.clear ();
1064 for (int i = 0; i < TEXT_COUNT; ++i) {
1065 _active_texts[i].clear ();
1068 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1069 if (time < i->content->position()) {
1070 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1071 we must seek this (following) content accurately, otherwise when we come to the end of the current
1072 content we may not start right at the beginning of the next, causing a gap (if the next content has
1073 been trimmed to a point between keyframes, or something).
1075 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1077 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1078 /* During; seek to position */
1079 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1082 /* After; this piece is done */
1088 _last_video_time = time;
1089 _last_video_eyes = EYES_LEFT;
1090 _last_audio_time = time;
1092 _last_video_time = optional<DCPTime>();
1093 _last_video_eyes = optional<Eyes>();
1094 _last_audio_time = optional<DCPTime>();
1097 _black.set_position (time);
1098 _silent.set_position (time);
1100 _last_video.clear ();
1104 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1106 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1107 player before the video that requires them.
1109 _delay.push_back (make_pair (pv, time));
1111 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1112 _last_video_time = time + one_video_frame();
1114 _last_video_eyes = increment_eyes (pv->eyes());
1116 if (_delay.size() < 3) {
1120 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1122 do_emit_video (to_do.first, to_do.second);
1126 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1128 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1129 for (int i = 0; i < TEXT_COUNT; ++i) {
1130 _active_texts[i].clear_before (time);
1134 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1136 pv->set_text (subtitles.get ());
1143 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1145 /* Log if the assert below is about to fail */
1146 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1147 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1150 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1151 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1152 Audio (data, time, _film->audio_frame_rate());
1153 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1157 Player::fill_audio (DCPTimePeriod period)
1159 if (period.from == period.to) {
1163 DCPOMATIC_ASSERT (period.from < period.to);
1165 DCPTime t = period.from;
1166 while (t < period.to) {
1167 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1168 Frame const samples = block.frames_round(_film->audio_frame_rate());
1170 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1171 silence->make_silent ();
1172 emit_audio (silence, t);
1179 Player::one_video_frame () const
1181 return DCPTime::from_frames (1, _film->video_frame_rate ());
1184 pair<shared_ptr<AudioBuffers>, DCPTime>
1185 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1187 DCPTime const discard_time = discard_to - time;
1188 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1189 Frame remaining_frames = audio->frames() - discard_frames;
1190 if (remaining_frames <= 0) {
1191 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1193 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1194 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1195 return make_pair(cut, time + discard_time);
1199 Player::set_dcp_decode_reduction (optional<int> reduction)
1201 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1204 boost::mutex::scoped_lock lm (_mutex);
1206 if (reduction == _dcp_decode_reduction) {
1208 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1212 _dcp_decode_reduction = reduction;
1213 setup_pieces_unlocked ();
1216 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1220 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1222 boost::mutex::scoped_lock lm (_mutex);
1224 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1225 if (i->content == content) {
1226 return content_time_to_dcp (i, t);
1230 /* We couldn't find this content; perhaps things are being changed over */
1231 return optional<DCPTime>();