2 Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
101 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102 /* The butler must hear about this first, so since we are proxying this through to the butler we must
105 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107 set_video_container_size (_film->frame_size ());
109 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
112 seek (DCPTime (), true);
121 Player::setup_pieces ()
123 boost::mutex::scoped_lock lm (_mutex);
124 setup_pieces_unlocked ();
128 have_video (shared_ptr<Piece> piece)
130 return piece->decoder && piece->decoder->video;
134 have_audio (shared_ptr<Piece> piece)
136 return piece->decoder && piece->decoder->audio;
140 Player::setup_pieces_unlocked ()
142 list<shared_ptr<Piece> > old_pieces = _pieces;
146 _shuffler = new Shuffler();
147 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
149 cout << "SPU " << _playlist->content().size() << ".\n";
151 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
153 if (!i->paths_valid ()) {
154 cout << "not valid.\n";
158 if (_ignore_video && _ignore_audio && i->text.empty()) {
159 cout << "text only.\n";
160 /* We're only interested in text and this content has none */
164 shared_ptr<Decoder> old_decoder;
165 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
166 if (j->content == i) {
167 old_decoder = j->decoder;
172 cout << " DF " << _tolerant << "\n";
173 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
174 FrameRateChange frc (_film, i);
177 /* Not something that we can decode; e.g. Atmos content */
181 if (decoder->video && _ignore_video) {
182 decoder->video->set_ignore (true);
185 if (decoder->audio && _ignore_audio) {
186 decoder->audio->set_ignore (true);
190 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
191 i->set_ignore (true);
195 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
197 dcp->set_decode_referenced (_play_referenced);
198 if (_play_referenced) {
199 dcp->set_forced_reduction (_dcp_decode_reduction);
203 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
204 _pieces.push_back (piece);
206 if (decoder->video) {
207 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
208 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
209 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
211 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
215 if (decoder->audio) {
216 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
219 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
221 while (j != decoder->text.end()) {
222 (*j)->BitmapStart.connect (
223 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
225 (*j)->PlainStart.connect (
226 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
229 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
236 _stream_states.clear ();
237 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
238 if (i->content->audio) {
239 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
240 _stream_states[j] = StreamState (i, i->content->position ());
245 _black = Empty (_film, _pieces, bind(&have_video, _1));
246 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
248 _last_video_time = DCPTime ();
249 _last_video_eyes = EYES_BOTH;
250 _last_audio_time = DCPTime ();
254 Player::playlist_content_change (ChangeType type, int property, bool frequent)
256 if (type == CHANGE_TYPE_PENDING) {
257 boost::mutex::scoped_lock lm (_mutex);
258 /* The player content is probably about to change, so we can't carry on
259 until that has happened and we've rebuilt our pieces. Stop pass()
260 and seek() from working until then.
263 } else if (type == CHANGE_TYPE_DONE) {
264 /* A change in our content has gone through. Re-build our pieces. */
267 } else if (type == CHANGE_TYPE_CANCELLED) {
268 boost::mutex::scoped_lock lm (_mutex);
272 Change (type, property, frequent);
276 Player::set_video_container_size (dcp::Size s)
278 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
281 boost::mutex::scoped_lock lm (_mutex);
283 if (s == _video_container_size) {
285 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
289 _video_container_size = s;
291 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
292 _black_image->make_black ();
295 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
299 Player::playlist_change (ChangeType type)
301 if (type == CHANGE_TYPE_DONE) {
304 Change (type, PlayerProperty::PLAYLIST, false);
308 Player::film_change (ChangeType type, Film::Property p)
310 /* Here we should notice Film properties that affect our output, and
311 alert listeners that our output now would be different to how it was
312 last time we were run.
315 if (p == Film::CONTAINER) {
316 Change (type, PlayerProperty::FILM_CONTAINER, false);
317 } else if (p == Film::VIDEO_FRAME_RATE) {
318 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
319 so we need new pieces here.
321 if (type == CHANGE_TYPE_DONE) {
324 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
325 } else if (p == Film::AUDIO_PROCESSOR) {
326 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
327 boost::mutex::scoped_lock lm (_mutex);
328 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
330 } else if (p == Film::AUDIO_CHANNELS) {
331 if (type == CHANGE_TYPE_DONE) {
332 boost::mutex::scoped_lock lm (_mutex);
333 _audio_merger.clear ();
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
341 return shared_ptr<PlayerVideo> (
343 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
346 _video_container_size,
347 _video_container_size,
350 PresetColourConversion::all().front().conversion,
352 boost::weak_ptr<Content>(),
353 boost::optional<Frame>()
359 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
361 DCPTime s = t - piece->content->position ();
362 s = min (piece->content->length_after_trim(_film), s);
363 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
365 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
366 then convert that ContentTime to frames at the content's rate. However this fails for
367 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
368 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
370 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
372 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
376 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
378 /* See comment in dcp_to_content_video */
379 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
380 return d + piece->content->position();
384 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
386 DCPTime s = t - piece->content->position ();
387 s = min (piece->content->length_after_trim(_film), s);
388 /* See notes in dcp_to_content_video */
389 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
393 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
395 /* See comment in dcp_to_content_video */
396 return DCPTime::from_frames (f, _film->audio_frame_rate())
397 - DCPTime (piece->content->trim_start(), piece->frc)
398 + piece->content->position();
402 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
404 DCPTime s = t - piece->content->position ();
405 s = min (piece->content->length_after_trim(_film), s);
406 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
410 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
412 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
415 list<shared_ptr<Font> >
416 Player::get_subtitle_fonts ()
418 boost::mutex::scoped_lock lm (_mutex);
420 list<shared_ptr<Font> > fonts;
421 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
422 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
423 /* XXX: things may go wrong if there are duplicate font IDs
424 with different font files.
426 list<shared_ptr<Font> > f = j->fonts ();
427 copy (f.begin(), f.end(), back_inserter (fonts));
434 /** Set this player never to produce any video data */
436 Player::set_ignore_video ()
438 boost::mutex::scoped_lock lm (_mutex);
439 _ignore_video = true;
440 setup_pieces_unlocked ();
444 Player::set_ignore_audio ()
446 boost::mutex::scoped_lock lm (_mutex);
447 _ignore_audio = true;
448 setup_pieces_unlocked ();
452 Player::set_ignore_text ()
454 boost::mutex::scoped_lock lm (_mutex);
456 setup_pieces_unlocked ();
459 /** Set the player to always burn open texts into the image regardless of the content settings */
461 Player::set_always_burn_open_subtitles ()
463 boost::mutex::scoped_lock lm (_mutex);
464 _always_burn_open_subtitles = true;
467 /** Sets up the player to be faster, possibly at the expense of quality */
471 boost::mutex::scoped_lock lm (_mutex);
473 setup_pieces_unlocked ();
477 Player::set_play_referenced ()
479 boost::mutex::scoped_lock lm (_mutex);
480 _play_referenced = true;
481 setup_pieces_unlocked ();
485 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
487 DCPOMATIC_ASSERT (r);
488 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
489 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
490 if (r->actual_duration() > 0) {
492 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
497 list<ReferencedReelAsset>
498 Player::get_reel_assets ()
500 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
502 list<ReferencedReelAsset> a;
504 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
505 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
510 scoped_ptr<DCPDecoder> decoder;
512 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
517 DCPOMATIC_ASSERT (j->video_frame_rate ());
518 double const cfr = j->video_frame_rate().get();
519 Frame const trim_start = j->trim_start().frames_round (cfr);
520 Frame const trim_end = j->trim_end().frames_round (cfr);
521 int const ffr = _film->video_frame_rate ();
523 /* position in the asset from the start */
524 int64_t offset_from_start = 0;
525 /* position in the asset from the end */
526 int64_t offset_from_end = 0;
527 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
528 /* Assume that main picture duration is the length of the reel */
529 offset_from_end += k->main_picture()->actual_duration();
532 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
534 /* Assume that main picture duration is the length of the reel */
535 int64_t const reel_duration = k->main_picture()->actual_duration();
537 /* See doc/design/trim_reels.svg */
538 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
539 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
541 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
542 if (j->reference_video ()) {
543 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
546 if (j->reference_audio ()) {
547 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
550 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
551 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
554 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
555 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
556 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
560 offset_from_start += reel_duration;
561 offset_from_end -= reel_duration;
571 boost::mutex::scoped_lock lm (_mutex);
574 /* We can't pass in this state */
578 if (_playlist->length(_film) == DCPTime()) {
579 /* Special case of an empty Film; just give one black frame */
580 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
584 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
586 shared_ptr<Piece> earliest_content;
587 optional<DCPTime> earliest_time;
589 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
594 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
595 if (t > i->content->end(_film)) {
599 /* Given two choices at the same time, pick the one with texts so we see it before
602 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
604 earliest_content = i;
618 if (earliest_content) {
622 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
623 earliest_time = _black.position ();
627 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
628 earliest_time = _silent.position ();
635 earliest_content->done = earliest_content->decoder->pass ();
636 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
637 if (dcp && !_play_referenced && dcp->reference_audio()) {
638 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
639 to `hide' the fact that no audio was emitted during the referenced DCP (though
640 we need to behave as though it was).
642 _last_audio_time = dcp->end (_film);
647 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
648 _black.set_position (_black.position() + one_video_frame());
652 DCPTimePeriod period (_silent.period_at_position());
653 if (_last_audio_time) {
654 /* Sometimes the thing that happened last finishes fractionally before
655 or after this silence. Bodge the start time of the silence to fix it.
657 DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
658 period.from = *_last_audio_time;
660 if (period.duration() > one_video_frame()) {
661 period.to = period.from + one_video_frame();
664 _silent.set_position (period.to);
672 /* Emit any audio that is ready */
674 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
675 of our streams, or the position of the _silent.
677 DCPTime pull_to = _film->length ();
678 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
679 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
680 pull_to = i->second.last_push_end;
683 if (!_silent.done() && _silent.position() < pull_to) {
684 pull_to = _silent.position();
687 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
688 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
689 if (_last_audio_time && i->second < *_last_audio_time) {
690 /* This new data comes before the last we emitted (or the last seek); discard it */
691 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
696 } else if (_last_audio_time && i->second > *_last_audio_time) {
697 /* There's a gap between this data and the last we emitted; fill with silence */
698 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
701 emit_audio (i->first, i->second);
706 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
707 do_emit_video(i->first, i->second);
714 /** @return Open subtitles for the frame at the given time, converted to images */
715 optional<PositionImage>
716 Player::open_subtitles_for_frame (DCPTime time) const
718 list<PositionImage> captions;
719 int const vfr = _film->video_frame_rate();
723 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
726 /* Bitmap subtitles */
727 BOOST_FOREACH (BitmapText i, j.bitmap) {
732 /* i.image will already have been scaled to fit _video_container_size */
733 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
739 lrint (_video_container_size.width * i.rectangle.x),
740 lrint (_video_container_size.height * i.rectangle.y)
746 /* String subtitles (rendered to an image) */
747 if (!j.string.empty ()) {
748 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
749 copy (s.begin(), s.end(), back_inserter (captions));
753 if (captions.empty ()) {
754 return optional<PositionImage> ();
757 return merge (captions);
761 Player::video (weak_ptr<Piece> wp, ContentVideo video)
763 shared_ptr<Piece> piece = wp.lock ();
768 FrameRateChange frc (_film, piece->content);
769 if (frc.skip && (video.frame % 2) == 1) {
773 /* Time of the first frame we will emit */
774 DCPTime const time = content_video_to_dcp (piece, video.frame);
776 /* Discard if it's before the content's period or the last accurate seek. We can't discard
777 if it's after the content's period here as in that case we still need to fill any gap between
778 `now' and the end of the content's period.
780 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
784 /* Fill gaps that we discover now that we have some video which needs to be emitted.
785 This is where we need to fill to.
787 DCPTime fill_to = min (time, piece->content->end(_film));
789 if (_last_video_time) {
790 DCPTime fill_from = max (*_last_video_time, piece->content->position());
792 /* Fill if we have more than half a frame to do */
793 if ((fill_to - fill_from) > one_video_frame() / 2) {
794 LastVideoMap::const_iterator last = _last_video.find (wp);
795 if (_film->three_d()) {
796 Eyes fill_to_eyes = video.eyes;
797 if (fill_to_eyes == EYES_BOTH) {
798 fill_to_eyes = EYES_LEFT;
800 if (fill_to == piece->content->end(_film)) {
801 /* Don't fill after the end of the content */
802 fill_to_eyes = EYES_LEFT;
804 DCPTime j = fill_from;
805 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
806 if (eyes == EYES_BOTH) {
809 while (j < fill_to || eyes != fill_to_eyes) {
810 if (last != _last_video.end()) {
811 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
812 copy->set_eyes (eyes);
813 emit_video (copy, j);
815 emit_video (black_player_video_frame(eyes), j);
817 if (eyes == EYES_RIGHT) {
818 j += one_video_frame();
820 eyes = increment_eyes (eyes);
823 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
824 if (last != _last_video.end()) {
825 emit_video (last->second, j);
827 emit_video (black_player_video_frame(EYES_BOTH), j);
834 _last_video[wp].reset (
837 piece->content->video->crop (),
838 piece->content->video->fade (_film, video.frame),
839 piece->content->video->scale().size (
840 piece->content->video, _video_container_size, _film->frame_size ()
842 _video_container_size,
845 piece->content->video->colour_conversion(),
846 piece->content->video->range(),
853 for (int i = 0; i < frc.repeat; ++i) {
854 if (t < piece->content->end(_film)) {
855 emit_video (_last_video[wp], t);
857 t += one_video_frame ();
862 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
864 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
866 shared_ptr<Piece> piece = wp.lock ();
871 shared_ptr<AudioContent> content = piece->content->audio;
872 DCPOMATIC_ASSERT (content);
874 int const rfr = content->resampled_frame_rate (_film);
876 /* Compute time in the DCP */
877 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
878 /* And the end of this block in the DCP */
879 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
881 /* Remove anything that comes before the start or after the end of the content */
882 if (time < piece->content->position()) {
883 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
885 /* This audio is entirely discarded */
888 content_audio.audio = cut.first;
890 } else if (time > piece->content->end(_film)) {
893 } else if (end > piece->content->end(_film)) {
894 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
895 if (remaining_frames == 0) {
898 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
899 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
900 content_audio.audio = cut;
903 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
907 if (content->gain() != 0) {
908 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
909 gain->apply_gain (content->gain ());
910 content_audio.audio = gain;
915 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
919 if (_audio_processor) {
920 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
925 _audio_merger.push (content_audio.audio, time);
926 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
927 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
931 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
933 shared_ptr<Piece> piece = wp.lock ();
934 shared_ptr<const TextContent> text = wc.lock ();
935 if (!piece || !text) {
939 /* Apply content's subtitle offsets */
940 subtitle.sub.rectangle.x += text->x_offset ();
941 subtitle.sub.rectangle.y += text->y_offset ();
943 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
944 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
945 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
947 /* Apply content's subtitle scale */
948 subtitle.sub.rectangle.width *= text->x_scale ();
949 subtitle.sub.rectangle.height *= text->y_scale ();
952 shared_ptr<Image> image = subtitle.sub.image;
953 /* We will scale the subtitle up to fit _video_container_size */
954 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
955 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
956 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
958 _active_texts[text->type()].add_from (wc, ps, from);
962 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
964 shared_ptr<Piece> piece = wp.lock ();
965 shared_ptr<const TextContent> text = wc.lock ();
966 if (!piece || !text) {
971 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
973 if (from > piece->content->end(_film)) {
977 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
978 s.set_h_position (s.h_position() + text->x_offset ());
979 s.set_v_position (s.v_position() + text->y_offset ());
980 float const xs = text->x_scale();
981 float const ys = text->y_scale();
982 float size = s.size();
984 /* Adjust size to express the common part of the scaling;
985 e.g. if xs = ys = 0.5 we scale size by 2.
987 if (xs > 1e-5 && ys > 1e-5) {
988 size *= 1 / min (1 / xs, 1 / ys);
992 /* Then express aspect ratio changes */
993 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
994 s.set_aspect_adjust (xs / ys);
997 s.set_in (dcp::Time(from.seconds(), 1000));
998 ps.string.push_back (StringText (s, text->outline_width()));
999 ps.add_fonts (text->fonts ());
1002 _active_texts[text->type()].add_from (wc, ps, from);
1006 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1008 shared_ptr<const TextContent> text = wc.lock ();
1013 if (!_active_texts[text->type()].have(wc)) {
1017 shared_ptr<Piece> piece = wp.lock ();
1022 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1024 if (dcp_to > piece->content->end(_film)) {
1028 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1030 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1031 if (text->use() && !always && !text->burn()) {
1032 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1037 Player::seek (DCPTime time, bool accurate)
1039 boost::mutex::scoped_lock lm (_mutex);
1042 /* We can't seek in this state */
1047 _shuffler->clear ();
1052 if (_audio_processor) {
1053 _audio_processor->flush ();
1056 _audio_merger.clear ();
1057 for (int i = 0; i < TEXT_COUNT; ++i) {
1058 _active_texts[i].clear ();
1061 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1062 if (time < i->content->position()) {
1063 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1064 we must seek this (following) content accurately, otherwise when we come to the end of the current
1065 content we may not start right at the beginning of the next, causing a gap (if the next content has
1066 been trimmed to a point between keyframes, or something).
1068 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1070 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1071 /* During; seek to position */
1072 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1075 /* After; this piece is done */
1081 _last_video_time = time;
1082 _last_video_eyes = EYES_LEFT;
1083 _last_audio_time = time;
1085 _last_video_time = optional<DCPTime>();
1086 _last_video_eyes = optional<Eyes>();
1087 _last_audio_time = optional<DCPTime>();
1090 _black.set_position (time);
1091 _silent.set_position (time);
1093 _last_video.clear ();
1097 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1099 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1100 player before the video that requires them.
1102 _delay.push_back (make_pair (pv, time));
1104 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1105 _last_video_time = time + one_video_frame();
1107 _last_video_eyes = increment_eyes (pv->eyes());
1109 if (_delay.size() < 3) {
1113 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1115 do_emit_video (to_do.first, to_do.second);
1119 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1121 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1122 for (int i = 0; i < TEXT_COUNT; ++i) {
1123 _active_texts[i].clear_before (time);
1127 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1129 pv->set_text (subtitles.get ());
1136 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1138 /* Log if the assert below is about to fail */
1139 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1140 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1143 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1144 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1145 Audio (data, time, _film->audio_frame_rate());
1146 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1150 Player::fill_audio (DCPTimePeriod period)
1152 if (period.from == period.to) {
1156 DCPOMATIC_ASSERT (period.from < period.to);
1158 DCPTime t = period.from;
1159 while (t < period.to) {
1160 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1161 Frame const samples = block.frames_round(_film->audio_frame_rate());
1163 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1164 silence->make_silent ();
1165 emit_audio (silence, t);
1172 Player::one_video_frame () const
1174 return DCPTime::from_frames (1, _film->video_frame_rate ());
1177 pair<shared_ptr<AudioBuffers>, DCPTime>
1178 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1180 DCPTime const discard_time = discard_to - time;
1181 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1182 Frame remaining_frames = audio->frames() - discard_frames;
1183 if (remaining_frames <= 0) {
1184 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1186 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1187 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1188 return make_pair(cut, time + discard_time);
1192 Player::set_dcp_decode_reduction (optional<int> reduction)
1194 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1197 boost::mutex::scoped_lock lm (_mutex);
1199 if (reduction == _dcp_decode_reduction) {
1201 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1205 _dcp_decode_reduction = reduction;
1206 setup_pieces_unlocked ();
1209 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1213 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1215 boost::mutex::scoped_lock lm (_mutex);
1217 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1218 if (i->content == content) {
1219 return content_time_to_dcp (i, t);
1223 /* We couldn't find this content; perhaps things are being changed over */
1224 return optional<DCPTime>();