2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1));
102 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
103 set_video_container_size (_film->frame_size ());
105 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
108 seek (DCPTime (), true);
117 Player::setup_pieces ()
119 boost::mutex::scoped_lock lm (_mutex);
120 setup_pieces_unlocked ();
124 Player::setup_pieces_unlocked ()
129 _shuffler = new Shuffler();
130 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
132 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
134 if (!i->paths_valid ()) {
138 if (_ignore_video && _ignore_audio && i->text.empty()) {
139 /* We're only interested in text and this content has none */
143 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
144 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
147 /* Not something that we can decode; e.g. Atmos content */
151 if (decoder->video && _ignore_video) {
152 decoder->video->set_ignore (true);
155 if (decoder->audio && _ignore_audio) {
156 decoder->audio->set_ignore (true);
160 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
161 i->set_ignore (true);
165 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
167 dcp->set_decode_referenced (_play_referenced);
168 if (_play_referenced) {
169 dcp->set_forced_reduction (_dcp_decode_reduction);
173 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
174 _pieces.push_back (piece);
176 if (decoder->video) {
177 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
178 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
179 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
181 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
185 if (decoder->audio) {
186 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
189 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
191 while (j != decoder->text.end()) {
192 (*j)->BitmapStart.connect (
193 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
195 (*j)->PlainStart.connect (
196 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
199 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
206 _stream_states.clear ();
207 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
208 if (i->content->audio) {
209 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
210 _stream_states[j] = StreamState (i, i->content->position ());
215 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
216 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
218 _last_video_time = DCPTime ();
219 _last_video_eyes = EYES_BOTH;
220 _last_audio_time = DCPTime ();
225 Player::playlist_content_change (ChangeType type, int property, bool frequent)
227 if (type == CHANGE_TYPE_PENDING) {
228 boost::mutex::scoped_lock lm (_mutex);
229 /* The player content is probably about to change, so we can't carry on
230 until that has happened and we've rebuilt our pieces. Stop pass()
231 and seek() from working until then.
234 } else if (type == CHANGE_TYPE_DONE) {
235 /* A change in our content has gone through. Re-build our pieces. */
239 Change (type, property, frequent);
243 Player::set_video_container_size (dcp::Size s)
245 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
248 boost::mutex::scoped_lock lm (_mutex);
250 if (s == _video_container_size) {
252 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
256 _video_container_size = s;
258 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
259 _black_image->make_black ();
262 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
266 Player::playlist_change (ChangeType type)
268 if (type == CHANGE_TYPE_DONE) {
271 Change (type, PlayerProperty::PLAYLIST, false);
275 Player::film_change (ChangeType type, Film::Property p)
277 /* Here we should notice Film properties that affect our output, and
278 alert listeners that our output now would be different to how it was
279 last time we were run.
282 if (p == Film::CONTAINER) {
283 Change (type, PlayerProperty::FILM_CONTAINER, false);
284 } else if (p == Film::VIDEO_FRAME_RATE) {
285 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
286 so we need new pieces here.
288 if (type == CHANGE_TYPE_DONE) {
291 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
292 } else if (p == Film::AUDIO_PROCESSOR) {
293 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
294 boost::mutex::scoped_lock lm (_mutex);
295 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
297 } else if (p == Film::AUDIO_CHANNELS) {
298 if (type == CHANGE_TYPE_DONE) {
299 boost::mutex::scoped_lock lm (_mutex);
300 _audio_merger.clear ();
306 Player::transform_bitmap_texts (list<BitmapText> subs) const
308 list<PositionImage> all;
310 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
315 /* We will scale the subtitle up to fit _video_container_size */
316 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
322 dcp::YUV_TO_RGB_REC601,
323 i->image->pixel_format (),
328 lrint (_video_container_size.width * i->rectangle.x),
329 lrint (_video_container_size.height * i->rectangle.y)
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
341 return shared_ptr<PlayerVideo> (
343 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
346 _video_container_size,
347 _video_container_size,
350 PresetColourConversion::all().front().conversion,
351 boost::weak_ptr<Content>(),
352 boost::optional<Frame>()
358 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
360 DCPTime s = t - piece->content->position ();
361 s = min (piece->content->length_after_trim(), s);
362 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
364 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
365 then convert that ContentTime to frames at the content's rate. However this fails for
366 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
367 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
369 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
371 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
375 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 /* See comment in dcp_to_content_video */
378 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
379 return d + piece->content->position();
383 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
385 DCPTime s = t - piece->content->position ();
386 s = min (piece->content->length_after_trim(), s);
387 /* See notes in dcp_to_content_video */
388 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
392 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
394 /* See comment in dcp_to_content_video */
395 return DCPTime::from_frames (f, _film->audio_frame_rate())
396 - DCPTime (piece->content->trim_start(), piece->frc)
397 + piece->content->position();
401 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
403 DCPTime s = t - piece->content->position ();
404 s = min (piece->content->length_after_trim(), s);
405 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
409 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
411 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
414 list<shared_ptr<Font> >
415 Player::get_subtitle_fonts ()
417 boost::mutex::scoped_lock lm (_mutex);
419 list<shared_ptr<Font> > fonts;
420 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
421 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
422 /* XXX: things may go wrong if there are duplicate font IDs
423 with different font files.
425 list<shared_ptr<Font> > f = j->fonts ();
426 copy (f.begin(), f.end(), back_inserter (fonts));
433 /** Set this player never to produce any video data */
435 Player::set_ignore_video ()
437 boost::mutex::scoped_lock lm (_mutex);
438 _ignore_video = true;
439 setup_pieces_unlocked ();
443 Player::set_ignore_audio ()
445 boost::mutex::scoped_lock lm (_mutex);
446 _ignore_audio = true;
447 setup_pieces_unlocked ();
451 Player::set_ignore_text ()
453 boost::mutex::scoped_lock lm (_mutex);
455 setup_pieces_unlocked ();
458 /** Set the player to always burn open texts into the image regardless of the content settings */
460 Player::set_always_burn_open_subtitles ()
462 boost::mutex::scoped_lock lm (_mutex);
463 _always_burn_open_subtitles = true;
466 /** Sets up the player to be faster, possibly at the expense of quality */
470 boost::mutex::scoped_lock lm (_mutex);
472 setup_pieces_unlocked ();
476 Player::set_play_referenced ()
478 boost::mutex::scoped_lock lm (_mutex);
479 _play_referenced = true;
480 setup_pieces_unlocked ();
483 list<ReferencedReelAsset>
484 Player::get_reel_assets ()
486 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
488 list<ReferencedReelAsset> a;
490 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
491 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
496 scoped_ptr<DCPDecoder> decoder;
498 decoder.reset (new DCPDecoder (j, _film->log(), false));
504 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
506 DCPOMATIC_ASSERT (j->video_frame_rate ());
507 double const cfr = j->video_frame_rate().get();
508 Frame const trim_start = j->trim_start().frames_round (cfr);
509 Frame const trim_end = j->trim_end().frames_round (cfr);
510 int const ffr = _film->video_frame_rate ();
512 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
513 if (j->reference_video ()) {
514 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
515 DCPOMATIC_ASSERT (ra);
516 ra->set_entry_point (ra->entry_point() + trim_start);
517 ra->set_duration (ra->duration() - trim_start - trim_end);
519 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
523 if (j->reference_audio ()) {
524 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
525 DCPOMATIC_ASSERT (ra);
526 ra->set_entry_point (ra->entry_point() + trim_start);
527 ra->set_duration (ra->duration() - trim_start - trim_end);
529 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
533 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
534 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
535 DCPOMATIC_ASSERT (ra);
536 ra->set_entry_point (ra->entry_point() + trim_start);
537 ra->set_duration (ra->duration() - trim_start - trim_end);
539 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
543 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
544 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
545 DCPOMATIC_ASSERT (l);
546 l->set_entry_point (l->entry_point() + trim_start);
547 l->set_duration (l->duration() - trim_start - trim_end);
549 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
554 /* Assume that main picture duration is the length of the reel */
555 offset += k->main_picture()->duration ();
565 boost::mutex::scoped_lock lm (_mutex);
568 /* We can't pass in this state */
572 if (_playlist->length() == DCPTime()) {
573 /* Special case of an empty Film; just give one black frame */
574 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
578 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
580 shared_ptr<Piece> earliest_content;
581 optional<DCPTime> earliest_time;
583 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
588 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
589 if (t > i->content->end()) {
593 /* Given two choices at the same time, pick the one with texts so we see it before
596 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
598 earliest_content = i;
612 if (earliest_content) {
616 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
617 earliest_time = _black.position ();
621 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
622 earliest_time = _silent.position ();
628 earliest_content->done = earliest_content->decoder->pass ();
631 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
632 _black.set_position (_black.position() + one_video_frame());
636 DCPTimePeriod period (_silent.period_at_position());
637 if (_last_audio_time) {
638 /* Sometimes the thing that happened last finishes fractionally before
639 this silence. Bodge the start time of the silence to fix it. I'm
640 not sure if this is the right solution --- maybe the last thing should
641 be padded `forward' rather than this thing padding `back'.
643 period.from = min(period.from, *_last_audio_time);
645 if (period.duration() > one_video_frame()) {
646 period.to = period.from + one_video_frame();
649 _silent.set_position (period.to);
657 /* Emit any audio that is ready */
659 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
660 of our streams, or the position of the _silent.
662 DCPTime pull_to = _film->length ();
663 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
664 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
665 pull_to = i->second.last_push_end;
668 if (!_silent.done() && _silent.position() < pull_to) {
669 pull_to = _silent.position();
672 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
673 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
674 if (_last_audio_time && i->second < *_last_audio_time) {
675 /* This new data comes before the last we emitted (or the last seek); discard it */
676 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
681 } else if (_last_audio_time && i->second > *_last_audio_time) {
682 /* There's a gap between this data and the last we emitted; fill with silence */
683 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
686 emit_audio (i->first, i->second);
691 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
692 do_emit_video(i->first, i->second);
699 /** @return Open subtitles for the frame at the given time, converted to images */
700 optional<PositionImage>
701 Player::open_subtitles_for_frame (DCPTime time) const
703 list<PositionImage> captions;
704 int const vfr = _film->video_frame_rate();
708 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
711 /* Bitmap subtitles */
712 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
713 copy (c.begin(), c.end(), back_inserter (captions));
715 /* String subtitles (rendered to an image) */
716 if (!j.string.empty ()) {
717 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
718 copy (s.begin(), s.end(), back_inserter (captions));
722 if (captions.empty ()) {
723 return optional<PositionImage> ();
726 return merge (captions);
730 Player::video (weak_ptr<Piece> wp, ContentVideo video)
732 shared_ptr<Piece> piece = wp.lock ();
737 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
738 if (frc.skip && (video.frame % 2) == 1) {
742 /* Time of the first frame we will emit */
743 DCPTime const time = content_video_to_dcp (piece, video.frame);
745 /* Discard if it's before the content's period or the last accurate seek. We can't discard
746 if it's after the content's period here as in that case we still need to fill any gap between
747 `now' and the end of the content's period.
749 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
753 /* Fill gaps that we discover now that we have some video which needs to be emitted.
754 This is where we need to fill to.
756 DCPTime fill_to = min (time, piece->content->end());
758 if (_last_video_time) {
759 DCPTime fill_from = max (*_last_video_time, piece->content->position());
760 LastVideoMap::const_iterator last = _last_video.find (wp);
761 if (_film->three_d()) {
762 Eyes fill_to_eyes = video.eyes;
763 if (fill_to_eyes == EYES_BOTH) {
764 fill_to_eyes = EYES_LEFT;
766 if (fill_to == piece->content->end()) {
767 /* Don't fill after the end of the content */
768 fill_to_eyes = EYES_LEFT;
770 DCPTime j = fill_from;
771 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
772 if (eyes == EYES_BOTH) {
775 while (j < fill_to || eyes != fill_to_eyes) {
776 if (last != _last_video.end()) {
777 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
778 copy->set_eyes (eyes);
779 emit_video (copy, j);
781 emit_video (black_player_video_frame(eyes), j);
783 if (eyes == EYES_RIGHT) {
784 j += one_video_frame();
786 eyes = increment_eyes (eyes);
789 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
790 if (last != _last_video.end()) {
791 emit_video (last->second, j);
793 emit_video (black_player_video_frame(EYES_BOTH), j);
799 _last_video[wp].reset (
802 piece->content->video->crop (),
803 piece->content->video->fade (video.frame),
804 piece->content->video->scale().size (
805 piece->content->video, _video_container_size, _film->frame_size ()
807 _video_container_size,
810 piece->content->video->colour_conversion(),
817 for (int i = 0; i < frc.repeat; ++i) {
818 if (t < piece->content->end()) {
819 emit_video (_last_video[wp], t);
821 t += one_video_frame ();
826 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
828 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
830 shared_ptr<Piece> piece = wp.lock ();
835 shared_ptr<AudioContent> content = piece->content->audio;
836 DCPOMATIC_ASSERT (content);
838 /* Compute time in the DCP */
839 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
840 /* And the end of this block in the DCP */
841 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
843 /* Remove anything that comes before the start or after the end of the content */
844 if (time < piece->content->position()) {
845 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
847 /* This audio is entirely discarded */
850 content_audio.audio = cut.first;
852 } else if (time > piece->content->end()) {
855 } else if (end > piece->content->end()) {
856 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
857 if (remaining_frames == 0) {
860 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
861 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
862 content_audio.audio = cut;
865 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
869 if (content->gain() != 0) {
870 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
871 gain->apply_gain (content->gain ());
872 content_audio.audio = gain;
877 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
881 if (_audio_processor) {
882 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
887 _audio_merger.push (content_audio.audio, time);
888 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
889 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
893 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
895 shared_ptr<Piece> piece = wp.lock ();
896 shared_ptr<const TextContent> text = wc.lock ();
897 if (!piece || !text) {
901 /* Apply content's subtitle offsets */
902 subtitle.sub.rectangle.x += text->x_offset ();
903 subtitle.sub.rectangle.y += text->y_offset ();
905 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
906 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
907 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
909 /* Apply content's subtitle scale */
910 subtitle.sub.rectangle.width *= text->x_scale ();
911 subtitle.sub.rectangle.height *= text->y_scale ();
914 ps.bitmap.push_back (subtitle.sub);
915 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
917 _active_texts[subtitle.type()].add_from (wc, ps, from);
921 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
923 shared_ptr<Piece> piece = wp.lock ();
924 shared_ptr<const TextContent> text = wc.lock ();
925 if (!piece || !text) {
930 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
932 if (from > piece->content->end()) {
936 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
937 s.set_h_position (s.h_position() + text->x_offset ());
938 s.set_v_position (s.v_position() + text->y_offset ());
939 float const xs = text->x_scale();
940 float const ys = text->y_scale();
941 float size = s.size();
943 /* Adjust size to express the common part of the scaling;
944 e.g. if xs = ys = 0.5 we scale size by 2.
946 if (xs > 1e-5 && ys > 1e-5) {
947 size *= 1 / min (1 / xs, 1 / ys);
951 /* Then express aspect ratio changes */
952 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
953 s.set_aspect_adjust (xs / ys);
956 s.set_in (dcp::Time(from.seconds(), 1000));
957 ps.string.push_back (StringText (s, text->outline_width()));
958 ps.add_fonts (text->fonts ());
961 _active_texts[subtitle.type()].add_from (wc, ps, from);
965 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
967 if (!_active_texts[type].have (wc)) {
971 shared_ptr<Piece> piece = wp.lock ();
972 shared_ptr<const TextContent> text = wc.lock ();
973 if (!piece || !text) {
977 DCPTime const dcp_to = content_time_to_dcp (piece, to);
979 if (dcp_to > piece->content->end()) {
983 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
985 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
986 if (text->use() && !always && !text->burn()) {
987 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
992 Player::seek (DCPTime time, bool accurate)
994 boost::mutex::scoped_lock lm (_mutex);
997 /* We can't seek in this state */
1002 _shuffler->clear ();
1007 if (_audio_processor) {
1008 _audio_processor->flush ();
1011 _audio_merger.clear ();
1012 for (int i = 0; i < TEXT_COUNT; ++i) {
1013 _active_texts[i].clear ();
1016 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1017 if (time < i->content->position()) {
1018 /* Before; seek to the start of the content */
1019 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1021 } else if (i->content->position() <= time && time < i->content->end()) {
1022 /* During; seek to position */
1023 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1026 /* After; this piece is done */
1032 _last_video_time = time;
1033 _last_video_eyes = EYES_LEFT;
1034 _last_audio_time = time;
1036 _last_video_time = optional<DCPTime>();
1037 _last_video_eyes = optional<Eyes>();
1038 _last_audio_time = optional<DCPTime>();
1041 _black.set_position (time);
1042 _silent.set_position (time);
1044 _last_video.clear ();
1048 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1050 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1051 player before the video that requires them.
1053 _delay.push_back (make_pair (pv, time));
1055 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1056 _last_video_time = time + one_video_frame();
1058 _last_video_eyes = increment_eyes (pv->eyes());
1060 if (_delay.size() < 3) {
1064 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1066 do_emit_video (to_do.first, to_do.second);
1070 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1072 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1073 for (int i = 0; i < TEXT_COUNT; ++i) {
1074 _active_texts[i].clear_before (time);
1078 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1080 pv->set_text (subtitles.get ());
1087 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1089 /* Log if the assert below is about to fail */
1090 if (_last_audio_time && time != *_last_audio_time) {
1091 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1094 /* This audio must follow on from the previous */
1095 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1097 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1101 Player::fill_audio (DCPTimePeriod period)
1103 if (period.from == period.to) {
1107 DCPOMATIC_ASSERT (period.from < period.to);
1109 DCPTime t = period.from;
1110 while (t < period.to) {
1111 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1112 Frame const samples = block.frames_round(_film->audio_frame_rate());
1114 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1115 silence->make_silent ();
1116 emit_audio (silence, t);
1123 Player::one_video_frame () const
1125 return DCPTime::from_frames (1, _film->video_frame_rate ());
1128 pair<shared_ptr<AudioBuffers>, DCPTime>
1129 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1131 DCPTime const discard_time = discard_to - time;
1132 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1133 Frame remaining_frames = audio->frames() - discard_frames;
1134 if (remaining_frames <= 0) {
1135 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1137 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1138 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1139 return make_pair(cut, time + discard_time);
1143 Player::set_dcp_decode_reduction (optional<int> reduction)
1145 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1148 boost::mutex::scoped_lock lm (_mutex);
1150 if (reduction == _dcp_decode_reduction) {
1152 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1156 _dcp_decode_reduction = reduction;
1157 setup_pieces_unlocked ();
1160 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1164 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1166 boost::mutex::scoped_lock lm (_mutex);
1168 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1169 if (i->content == content) {
1170 return content_time_to_dcp (i, t);
1174 /* We couldn't find this content; perhaps things are being changed over */
1175 return optional<DCPTime>();