2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1));
102 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
103 set_video_container_size (_film->frame_size ());
105 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
108 seek (DCPTime (), true);
117 Player::setup_pieces ()
119 boost::mutex::scoped_lock lm (_mutex);
120 setup_pieces_unlocked ();
124 Player::setup_pieces_unlocked ()
129 _shuffler = new Shuffler();
130 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
132 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
134 if (!i->paths_valid ()) {
138 if (_ignore_video && _ignore_audio && i->text.empty()) {
139 /* We're only interested in text and this content has none */
143 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
144 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
147 /* Not something that we can decode; e.g. Atmos content */
151 if (decoder->video && _ignore_video) {
152 decoder->video->set_ignore (true);
155 if (decoder->audio && _ignore_audio) {
156 decoder->audio->set_ignore (true);
160 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
161 i->set_ignore (true);
165 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
167 dcp->set_decode_referenced (_play_referenced);
168 if (_play_referenced) {
169 dcp->set_forced_reduction (_dcp_decode_reduction);
173 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
174 _pieces.push_back (piece);
176 if (decoder->video) {
177 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
178 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
179 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
181 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
185 if (decoder->audio) {
186 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
189 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
191 while (j != decoder->text.end()) {
192 (*j)->BitmapStart.connect (
193 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
195 (*j)->PlainStart.connect (
196 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
199 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
206 _stream_states.clear ();
207 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
208 if (i->content->audio) {
209 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
210 _stream_states[j] = StreamState (i, i->content->position ());
215 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
216 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
218 _last_video_time = DCPTime ();
219 _last_video_eyes = EYES_BOTH;
220 _last_audio_time = DCPTime ();
225 Player::playlist_content_change (ChangeType type, int property, bool frequent)
227 if (type == CHANGE_TYPE_PENDING) {
228 boost::mutex::scoped_lock lm (_mutex);
229 /* The player content is probably about to change, so we can't carry on
230 until that has happened and we've rebuilt our pieces. Stop pass()
231 and seek() from working until then.
234 } else if (type == CHANGE_TYPE_DONE) {
235 /* A change in our content has gone through. Re-build our pieces. */
239 Change (type, property, frequent);
243 Player::set_video_container_size (dcp::Size s)
245 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
248 boost::mutex::scoped_lock lm (_mutex);
250 if (s == _video_container_size) {
252 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
256 _video_container_size = s;
258 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
259 _black_image->make_black ();
262 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
266 Player::playlist_change (ChangeType type)
268 if (type == CHANGE_TYPE_DONE) {
271 Change (type, PlayerProperty::PLAYLIST, false);
275 Player::film_change (ChangeType type, Film::Property p)
277 /* Here we should notice Film properties that affect our output, and
278 alert listeners that our output now would be different to how it was
279 last time we were run.
282 if (p == Film::CONTAINER) {
283 Change (type, PlayerProperty::FILM_CONTAINER, false);
284 } else if (p == Film::VIDEO_FRAME_RATE) {
285 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
286 so we need new pieces here.
288 if (type == CHANGE_TYPE_DONE) {
291 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
292 } else if (p == Film::AUDIO_PROCESSOR) {
293 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
294 boost::mutex::scoped_lock lm (_mutex);
295 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
297 } else if (p == Film::AUDIO_CHANNELS) {
298 if (type == CHANGE_TYPE_DONE) {
299 boost::mutex::scoped_lock lm (_mutex);
300 _audio_merger.clear ();
306 Player::transform_bitmap_texts (list<BitmapText> subs) const
308 list<PositionImage> all;
310 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
315 /* We will scale the subtitle up to fit _video_container_size */
316 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
322 dcp::YUV_TO_RGB_REC601,
323 i->image->pixel_format (),
328 lrint (_video_container_size.width * i->rectangle.x),
329 lrint (_video_container_size.height * i->rectangle.y)
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
341 return shared_ptr<PlayerVideo> (
343 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
346 _video_container_size,
347 _video_container_size,
350 PresetColourConversion::all().front().conversion,
351 boost::weak_ptr<Content>(),
352 boost::optional<Frame>()
358 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
360 DCPTime s = t - piece->content->position ();
361 s = min (piece->content->length_after_trim(), s);
362 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
364 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
365 then convert that ContentTime to frames at the content's rate. However this fails for
366 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
367 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
369 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
371 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
375 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 /* See comment in dcp_to_content_video */
378 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
379 return d + piece->content->position();
383 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
385 DCPTime s = t - piece->content->position ();
386 s = min (piece->content->length_after_trim(), s);
387 /* See notes in dcp_to_content_video */
388 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
392 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
394 /* See comment in dcp_to_content_video */
395 return DCPTime::from_frames (f, _film->audio_frame_rate())
396 - DCPTime (piece->content->trim_start(), piece->frc)
397 + piece->content->position();
401 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
403 DCPTime s = t - piece->content->position ();
404 s = min (piece->content->length_after_trim(), s);
405 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
409 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
411 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
414 list<shared_ptr<Font> >
415 Player::get_subtitle_fonts ()
417 boost::mutex::scoped_lock lm (_mutex);
419 list<shared_ptr<Font> > fonts;
420 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
421 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
422 /* XXX: things may go wrong if there are duplicate font IDs
423 with different font files.
425 list<shared_ptr<Font> > f = j->fonts ();
426 copy (f.begin(), f.end(), back_inserter (fonts));
433 /** Set this player never to produce any video data */
435 Player::set_ignore_video ()
437 boost::mutex::scoped_lock lm (_mutex);
438 _ignore_video = true;
439 setup_pieces_unlocked ();
443 Player::set_ignore_audio ()
445 boost::mutex::scoped_lock lm (_mutex);
446 _ignore_audio = true;
447 setup_pieces_unlocked ();
451 Player::set_ignore_text ()
453 boost::mutex::scoped_lock lm (_mutex);
455 setup_pieces_unlocked ();
458 /** Set the player to always burn open texts into the image regardless of the content settings */
460 Player::set_always_burn_open_subtitles ()
462 boost::mutex::scoped_lock lm (_mutex);
463 _always_burn_open_subtitles = true;
466 /** Sets up the player to be faster, possibly at the expense of quality */
470 boost::mutex::scoped_lock lm (_mutex);
472 setup_pieces_unlocked ();
476 Player::set_play_referenced ()
478 boost::mutex::scoped_lock lm (_mutex);
479 _play_referenced = true;
480 setup_pieces_unlocked ();
483 list<ReferencedReelAsset>
484 Player::get_reel_assets ()
486 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
488 list<ReferencedReelAsset> a;
490 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
491 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
496 scoped_ptr<DCPDecoder> decoder;
498 decoder.reset (new DCPDecoder (j, _film->log(), false));
504 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
506 DCPOMATIC_ASSERT (j->video_frame_rate ());
507 double const cfr = j->video_frame_rate().get();
508 Frame const trim_start = j->trim_start().frames_round (cfr);
509 Frame const trim_end = j->trim_end().frames_round (cfr);
510 int const ffr = _film->video_frame_rate ();
512 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
513 if (j->reference_video ()) {
514 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
515 DCPOMATIC_ASSERT (ra);
516 ra->set_entry_point (ra->entry_point() + trim_start);
517 ra->set_duration (ra->duration() - trim_start - trim_end);
519 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
523 if (j->reference_audio ()) {
524 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
525 DCPOMATIC_ASSERT (ra);
526 ra->set_entry_point (ra->entry_point() + trim_start);
527 ra->set_duration (ra->duration() - trim_start - trim_end);
529 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
533 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
534 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
535 DCPOMATIC_ASSERT (ra);
536 ra->set_entry_point (ra->entry_point() + trim_start);
537 ra->set_duration (ra->duration() - trim_start - trim_end);
539 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
543 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
544 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
545 DCPOMATIC_ASSERT (ra);
546 ra->set_entry_point (ra->entry_point() + trim_start);
547 ra->set_duration (ra->duration() - trim_start - trim_end);
549 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
553 /* Assume that main picture duration is the length of the reel */
554 offset += k->main_picture()->duration ();
564 boost::mutex::scoped_lock lm (_mutex);
567 /* We can't pass in this state */
571 if (_playlist->length() == DCPTime()) {
572 /* Special case of an empty Film; just give one black frame */
573 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
577 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
579 shared_ptr<Piece> earliest_content;
580 optional<DCPTime> earliest_time;
582 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
587 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
588 if (t > i->content->end()) {
592 /* Given two choices at the same time, pick the one with texts so we see it before
595 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
597 earliest_content = i;
611 if (earliest_content) {
615 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
616 earliest_time = _black.position ();
620 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
621 earliest_time = _silent.position ();
627 earliest_content->done = earliest_content->decoder->pass ();
630 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
631 _black.set_position (_black.position() + one_video_frame());
635 DCPTimePeriod period (_silent.period_at_position());
636 if (_last_audio_time) {
637 /* Sometimes the thing that happened last finishes fractionally before
638 this silence. Bodge the start time of the silence to fix it. I'm
639 not sure if this is the right solution --- maybe the last thing should
640 be padded `forward' rather than this thing padding `back'.
642 period.from = min(period.from, *_last_audio_time);
644 if (period.duration() > one_video_frame()) {
645 period.to = period.from + one_video_frame();
648 _silent.set_position (period.to);
656 /* Emit any audio that is ready */
658 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
659 of our streams, or the position of the _silent.
661 DCPTime pull_to = _film->length ();
662 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
663 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
664 pull_to = i->second.last_push_end;
667 if (!_silent.done() && _silent.position() < pull_to) {
668 pull_to = _silent.position();
671 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
672 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
673 if (_last_audio_time && i->second < *_last_audio_time) {
674 /* This new data comes before the last we emitted (or the last seek); discard it */
675 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
680 } else if (_last_audio_time && i->second > *_last_audio_time) {
681 /* There's a gap between this data and the last we emitted; fill with silence */
682 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
685 emit_audio (i->first, i->second);
690 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
691 do_emit_video(i->first, i->second);
698 /** @return Open subtitles for the frame at the given time, converted to images */
699 optional<PositionImage>
700 Player::open_subtitles_for_frame (DCPTime time) const
702 list<PositionImage> captions;
703 int const vfr = _film->video_frame_rate();
707 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
710 /* Bitmap subtitles */
711 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
712 copy (c.begin(), c.end(), back_inserter (captions));
714 /* String subtitles (rendered to an image) */
715 if (!j.string.empty ()) {
716 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
717 copy (s.begin(), s.end(), back_inserter (captions));
721 if (captions.empty ()) {
722 return optional<PositionImage> ();
725 return merge (captions);
729 Player::video (weak_ptr<Piece> wp, ContentVideo video)
731 shared_ptr<Piece> piece = wp.lock ();
736 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
737 if (frc.skip && (video.frame % 2) == 1) {
741 /* Time of the first frame we will emit */
742 DCPTime const time = content_video_to_dcp (piece, video.frame);
744 /* Discard if it's before the content's period or the last accurate seek. We can't discard
745 if it's after the content's period here as in that case we still need to fill any gap between
746 `now' and the end of the content's period.
748 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
752 /* Fill gaps that we discover now that we have some video which needs to be emitted.
753 This is where we need to fill to.
755 DCPTime fill_to = min (time, piece->content->end());
757 if (_last_video_time) {
758 DCPTime fill_from = max (*_last_video_time, piece->content->position());
759 LastVideoMap::const_iterator last = _last_video.find (wp);
760 if (_film->three_d()) {
761 Eyes fill_to_eyes = video.eyes;
762 if (fill_to_eyes == EYES_BOTH) {
763 fill_to_eyes = EYES_LEFT;
765 if (fill_to == piece->content->end()) {
766 /* Don't fill after the end of the content */
767 fill_to_eyes = EYES_LEFT;
769 DCPTime j = fill_from;
770 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
771 if (eyes == EYES_BOTH) {
774 while (j < fill_to || eyes != fill_to_eyes) {
775 if (last != _last_video.end()) {
776 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
777 copy->set_eyes (eyes);
778 emit_video (copy, j);
780 emit_video (black_player_video_frame(eyes), j);
782 if (eyes == EYES_RIGHT) {
783 j += one_video_frame();
785 eyes = increment_eyes (eyes);
788 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
789 if (last != _last_video.end()) {
790 emit_video (last->second, j);
792 emit_video (black_player_video_frame(EYES_BOTH), j);
798 _last_video[wp].reset (
801 piece->content->video->crop (),
802 piece->content->video->fade (video.frame),
803 piece->content->video->scale().size (
804 piece->content->video, _video_container_size, _film->frame_size ()
806 _video_container_size,
809 piece->content->video->colour_conversion(),
816 for (int i = 0; i < frc.repeat; ++i) {
817 if (t < piece->content->end()) {
818 emit_video (_last_video[wp], t);
820 t += one_video_frame ();
825 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
827 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
829 shared_ptr<Piece> piece = wp.lock ();
834 shared_ptr<AudioContent> content = piece->content->audio;
835 DCPOMATIC_ASSERT (content);
837 /* Compute time in the DCP */
838 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
839 /* And the end of this block in the DCP */
840 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
842 /* Remove anything that comes before the start or after the end of the content */
843 if (time < piece->content->position()) {
844 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
846 /* This audio is entirely discarded */
849 content_audio.audio = cut.first;
851 } else if (time > piece->content->end()) {
854 } else if (end > piece->content->end()) {
855 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
856 if (remaining_frames == 0) {
859 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
860 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
861 content_audio.audio = cut;
864 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
868 if (content->gain() != 0) {
869 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
870 gain->apply_gain (content->gain ());
871 content_audio.audio = gain;
876 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
880 if (_audio_processor) {
881 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
886 _audio_merger.push (content_audio.audio, time);
887 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
888 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
892 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
894 shared_ptr<Piece> piece = wp.lock ();
895 shared_ptr<const TextContent> text = wc.lock ();
896 if (!piece || !text) {
900 /* Apply content's subtitle offsets */
901 subtitle.sub.rectangle.x += text->x_offset ();
902 subtitle.sub.rectangle.y += text->y_offset ();
904 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
905 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
906 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
908 /* Apply content's subtitle scale */
909 subtitle.sub.rectangle.width *= text->x_scale ();
910 subtitle.sub.rectangle.height *= text->y_scale ();
913 ps.bitmap.push_back (subtitle.sub);
914 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
916 _active_texts[subtitle.type()].add_from (wc, ps, from);
920 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
922 shared_ptr<Piece> piece = wp.lock ();
923 shared_ptr<const TextContent> text = wc.lock ();
924 if (!piece || !text) {
929 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
931 if (from > piece->content->end()) {
935 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
936 s.set_h_position (s.h_position() + text->x_offset ());
937 s.set_v_position (s.v_position() + text->y_offset ());
938 float const xs = text->x_scale();
939 float const ys = text->y_scale();
940 float size = s.size();
942 /* Adjust size to express the common part of the scaling;
943 e.g. if xs = ys = 0.5 we scale size by 2.
945 if (xs > 1e-5 && ys > 1e-5) {
946 size *= 1 / min (1 / xs, 1 / ys);
950 /* Then express aspect ratio changes */
951 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
952 s.set_aspect_adjust (xs / ys);
955 s.set_in (dcp::Time(from.seconds(), 1000));
956 ps.string.push_back (StringText (s, text->outline_width()));
957 ps.add_fonts (text->fonts ());
960 _active_texts[subtitle.type()].add_from (wc, ps, from);
964 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
966 if (!_active_texts[type].have (wc)) {
970 shared_ptr<Piece> piece = wp.lock ();
971 shared_ptr<const TextContent> text = wc.lock ();
972 if (!piece || !text) {
976 DCPTime const dcp_to = content_time_to_dcp (piece, to);
978 if (dcp_to > piece->content->end()) {
982 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
984 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
985 if (text->use() && !always && !text->burn()) {
986 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
991 Player::seek (DCPTime time, bool accurate)
993 boost::mutex::scoped_lock lm (_mutex);
996 /* We can't seek in this state */
1001 _shuffler->clear ();
1006 if (_audio_processor) {
1007 _audio_processor->flush ();
1010 _audio_merger.clear ();
1011 for (int i = 0; i < TEXT_COUNT; ++i) {
1012 _active_texts[i].clear ();
1015 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1016 if (time < i->content->position()) {
1017 /* Before; seek to the start of the content */
1018 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1020 } else if (i->content->position() <= time && time < i->content->end()) {
1021 /* During; seek to position */
1022 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1025 /* After; this piece is done */
1031 _last_video_time = time;
1032 _last_video_eyes = EYES_LEFT;
1033 _last_audio_time = time;
1035 _last_video_time = optional<DCPTime>();
1036 _last_video_eyes = optional<Eyes>();
1037 _last_audio_time = optional<DCPTime>();
1040 _black.set_position (time);
1041 _silent.set_position (time);
1043 _last_video.clear ();
1047 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1049 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1050 player before the video that requires them.
1052 _delay.push_back (make_pair (pv, time));
1054 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1055 _last_video_time = time + one_video_frame();
1057 _last_video_eyes = increment_eyes (pv->eyes());
1059 if (_delay.size() < 3) {
1063 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1065 do_emit_video (to_do.first, to_do.second);
1069 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1071 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1072 for (int i = 0; i < TEXT_COUNT; ++i) {
1073 _active_texts[i].clear_before (time);
1077 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1079 pv->set_text (subtitles.get ());
1086 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1088 /* Log if the assert below is about to fail */
1089 if (_last_audio_time && time != *_last_audio_time) {
1090 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1093 /* This audio must follow on from the previous */
1094 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1096 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1100 Player::fill_audio (DCPTimePeriod period)
1102 if (period.from == period.to) {
1106 DCPOMATIC_ASSERT (period.from < period.to);
1108 DCPTime t = period.from;
1109 while (t < period.to) {
1110 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1111 Frame const samples = block.frames_round(_film->audio_frame_rate());
1113 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1114 silence->make_silent ();
1115 emit_audio (silence, t);
1122 Player::one_video_frame () const
1124 return DCPTime::from_frames (1, _film->video_frame_rate ());
1127 pair<shared_ptr<AudioBuffers>, DCPTime>
1128 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1130 DCPTime const discard_time = discard_to - time;
1131 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1132 Frame remaining_frames = audio->frames() - discard_frames;
1133 if (remaining_frames <= 0) {
1134 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1136 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1137 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1138 return make_pair(cut, time + discard_time);
1142 Player::set_dcp_decode_reduction (optional<int> reduction)
1144 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1147 boost::mutex::scoped_lock lm (_mutex);
1149 if (reduction == _dcp_decode_reduction) {
1151 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1155 _dcp_decode_reduction = reduction;
1156 setup_pieces_unlocked ();
1159 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1163 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1165 boost::mutex::scoped_lock lm (_mutex);
1167 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1168 if (i->content == content) {
1169 return content_time_to_dcp (i, t);
1173 /* We couldn't find this content; perhaps things are being changed over */
1174 return optional<DCPTime>();