2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
103 set_video_container_size (_film->frame_size ());
105 film_changed (Film::AUDIO_PROCESSOR);
107 seek (DCPTime (), true);
116 Player::setup_pieces ()
121 _shuffler = new Shuffler();
122 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
124 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
126 if (!i->paths_valid ()) {
130 if (_ignore_video && _ignore_audio && i->text.empty()) {
131 /* We're only interested in text and this content has none */
135 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
136 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
139 /* Not something that we can decode; e.g. Atmos content */
143 if (decoder->video && _ignore_video) {
144 decoder->video->set_ignore (true);
147 if (decoder->audio && _ignore_audio) {
148 decoder->audio->set_ignore (true);
152 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
153 i->set_ignore (true);
157 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
159 dcp->set_decode_referenced (_play_referenced);
160 if (_play_referenced) {
161 dcp->set_forced_reduction (_dcp_decode_reduction);
165 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
166 _pieces.push_back (piece);
168 if (decoder->video) {
169 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
170 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
171 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
173 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
177 if (decoder->audio) {
178 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
181 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
183 while (j != decoder->text.end()) {
184 (*j)->BitmapStart.connect (
185 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
187 (*j)->PlainStart.connect (
188 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
191 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
198 _stream_states.clear ();
199 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
200 if (i->content->audio) {
201 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
202 _stream_states[j] = StreamState (i, i->content->position ());
207 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
208 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
210 _last_video_time = DCPTime ();
211 _last_video_eyes = EYES_BOTH;
212 _last_audio_time = DCPTime ();
213 _have_valid_pieces = true;
217 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
219 shared_ptr<Content> c = w.lock ();
225 property == ContentProperty::POSITION ||
226 property == ContentProperty::LENGTH ||
227 property == ContentProperty::TRIM_START ||
228 property == ContentProperty::TRIM_END ||
229 property == ContentProperty::PATH ||
230 property == VideoContentProperty::FRAME_TYPE ||
231 property == VideoContentProperty::COLOUR_CONVERSION ||
232 property == AudioContentProperty::STREAMS ||
233 property == DCPContentProperty::NEEDS_ASSETS ||
234 property == DCPContentProperty::NEEDS_KDM ||
235 property == TextContentProperty::COLOUR ||
236 property == TextContentProperty::EFFECT ||
237 property == TextContentProperty::EFFECT_COLOUR ||
238 property == FFmpegContentProperty::SUBTITLE_STREAM ||
239 property == FFmpegContentProperty::FILTERS
242 _have_valid_pieces = false;
243 Changed (property, frequent);
246 property == TextContentProperty::LINE_SPACING ||
247 property == TextContentProperty::OUTLINE_WIDTH ||
248 property == TextContentProperty::Y_SCALE ||
249 property == TextContentProperty::FADE_IN ||
250 property == TextContentProperty::FADE_OUT ||
251 property == ContentProperty::VIDEO_FRAME_RATE ||
252 property == TextContentProperty::USE ||
253 property == TextContentProperty::X_OFFSET ||
254 property == TextContentProperty::Y_OFFSET ||
255 property == TextContentProperty::X_SCALE ||
256 property == TextContentProperty::FONTS ||
257 property == TextContentProperty::TYPE ||
258 property == VideoContentProperty::CROP ||
259 property == VideoContentProperty::SCALE ||
260 property == VideoContentProperty::FADE_IN ||
261 property == VideoContentProperty::FADE_OUT
264 Changed (property, frequent);
269 Player::set_video_container_size (dcp::Size s)
272 boost::mutex::scoped_lock lm (_mutex);
274 if (s == _video_container_size) {
278 _video_container_size = s;
280 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
281 _black_image->make_black ();
284 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
288 Player::playlist_changed ()
290 _have_valid_pieces = false;
291 Changed (PlayerProperty::PLAYLIST, false);
295 Player::film_changed (Film::Property p)
297 /* Here we should notice Film properties that affect our output, and
298 alert listeners that our output now would be different to how it was
299 last time we were run.
302 if (p == Film::CONTAINER) {
303 Changed (PlayerProperty::FILM_CONTAINER, false);
304 } else if (p == Film::VIDEO_FRAME_RATE) {
305 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
306 so we need new pieces here.
308 _have_valid_pieces = false;
309 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
310 } else if (p == Film::AUDIO_PROCESSOR) {
311 if (_film->audio_processor ()) {
312 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
314 } else if (p == Film::AUDIO_CHANNELS) {
315 _audio_merger.clear ();
320 Player::transform_bitmap_texts (list<BitmapText> subs) const
322 list<PositionImage> all;
324 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
329 /* We will scale the subtitle up to fit _video_container_size */
330 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
336 dcp::YUV_TO_RGB_REC601,
337 i->image->pixel_format (),
342 lrint (_video_container_size.width * i->rectangle.x),
343 lrint (_video_container_size.height * i->rectangle.y)
352 shared_ptr<PlayerVideo>
353 Player::black_player_video_frame (Eyes eyes) const
355 return shared_ptr<PlayerVideo> (
357 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
360 _video_container_size,
361 _video_container_size,
364 PresetColourConversion::all().front().conversion,
365 boost::weak_ptr<Content>(),
366 boost::optional<Frame>()
372 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
374 DCPTime s = t - piece->content->position ();
375 s = min (piece->content->length_after_trim(), s);
376 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
378 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
379 then convert that ContentTime to frames at the content's rate. However this fails for
380 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
381 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
383 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
385 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
389 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
391 /* See comment in dcp_to_content_video */
392 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
393 return d + piece->content->position();
397 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
399 DCPTime s = t - piece->content->position ();
400 s = min (piece->content->length_after_trim(), s);
401 /* See notes in dcp_to_content_video */
402 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
406 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
408 /* See comment in dcp_to_content_video */
409 return DCPTime::from_frames (f, _film->audio_frame_rate())
410 - DCPTime (piece->content->trim_start(), piece->frc)
411 + piece->content->position();
415 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
417 DCPTime s = t - piece->content->position ();
418 s = min (piece->content->length_after_trim(), s);
419 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
423 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
425 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
428 list<shared_ptr<Font> >
429 Player::get_subtitle_fonts ()
431 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
433 if (!_have_valid_pieces) {
437 list<shared_ptr<Font> > fonts;
438 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
439 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
440 /* XXX: things may go wrong if there are duplicate font IDs
441 with different font files.
443 list<shared_ptr<Font> > f = j->fonts ();
444 copy (f.begin(), f.end(), back_inserter (fonts));
451 /** Set this player never to produce any video data */
453 Player::set_ignore_video ()
455 boost::mutex::scoped_lock lm (_mutex);
456 _ignore_video = true;
457 _have_valid_pieces = false;
461 Player::set_ignore_audio ()
463 _ignore_audio = true;
464 _have_valid_pieces = false;
468 Player::set_ignore_text ()
470 boost::mutex::scoped_lock lm (_mutex);
474 /** Set the player to always burn open texts into the image regardless of the content settings */
476 Player::set_always_burn_open_subtitles ()
478 boost::mutex::scoped_lock lm (_mutex);
479 _always_burn_open_subtitles = true;
482 /** Sets up the player to be faster, possibly at the expense of quality */
486 boost::mutex::scoped_lock lm (_mutex);
488 _have_valid_pieces = false;
492 Player::set_play_referenced ()
494 boost::mutex::scoped_lock lm (_mutex);
495 _play_referenced = true;
496 _have_valid_pieces = false;
499 list<ReferencedReelAsset>
500 Player::get_reel_assets ()
502 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
504 list<ReferencedReelAsset> a;
506 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
507 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
512 scoped_ptr<DCPDecoder> decoder;
514 decoder.reset (new DCPDecoder (j, _film->log(), false));
520 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
522 DCPOMATIC_ASSERT (j->video_frame_rate ());
523 double const cfr = j->video_frame_rate().get();
524 Frame const trim_start = j->trim_start().frames_round (cfr);
525 Frame const trim_end = j->trim_end().frames_round (cfr);
526 int const ffr = _film->video_frame_rate ();
528 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
529 if (j->reference_video ()) {
530 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
531 DCPOMATIC_ASSERT (ra);
532 ra->set_entry_point (ra->entry_point() + trim_start);
533 ra->set_duration (ra->duration() - trim_start - trim_end);
535 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
539 if (j->reference_audio ()) {
540 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
541 DCPOMATIC_ASSERT (ra);
542 ra->set_entry_point (ra->entry_point() + trim_start);
543 ra->set_duration (ra->duration() - trim_start - trim_end);
545 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
549 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
550 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
551 DCPOMATIC_ASSERT (ra);
552 ra->set_entry_point (ra->entry_point() + trim_start);
553 ra->set_duration (ra->duration() - trim_start - trim_end);
555 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
559 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
560 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
561 DCPOMATIC_ASSERT (ra);
562 ra->set_entry_point (ra->entry_point() + trim_start);
563 ra->set_duration (ra->duration() - trim_start - trim_end);
565 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
569 /* Assume that main picture duration is the length of the reel */
570 offset += k->main_picture()->duration ();
580 boost::mutex::scoped_lock lm (_mutex);
582 if (!_have_valid_pieces) {
586 if (_playlist->length() == DCPTime()) {
587 /* Special case of an empty Film; just give one black frame */
588 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
592 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
594 shared_ptr<Piece> earliest_content;
595 optional<DCPTime> earliest_time;
597 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
602 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
603 if (t > i->content->end()) {
607 /* Given two choices at the same time, pick the one with texts so we see it before
610 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
612 earliest_content = i;
626 if (earliest_content) {
630 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
631 earliest_time = _black.position ();
635 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
636 earliest_time = _silent.position ();
642 earliest_content->done = earliest_content->decoder->pass ();
645 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
646 _black.set_position (_black.position() + one_video_frame());
650 DCPTimePeriod period (_silent.period_at_position());
651 if (_last_audio_time) {
652 /* Sometimes the thing that happened last finishes fractionally before
653 this silence. Bodge the start time of the silence to fix it. I'm
654 not sure if this is the right solution --- maybe the last thing should
655 be padded `forward' rather than this thing padding `back'.
657 period.from = min(period.from, *_last_audio_time);
659 if (period.duration() > one_video_frame()) {
660 period.to = period.from + one_video_frame();
663 _silent.set_position (period.to);
671 /* Emit any audio that is ready */
673 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
674 of our streams, or the position of the _silent.
676 DCPTime pull_to = _film->length ();
677 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
678 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
679 pull_to = i->second.last_push_end;
682 if (!_silent.done() && _silent.position() < pull_to) {
683 pull_to = _silent.position();
686 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
687 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
688 if (_last_audio_time && i->second < *_last_audio_time) {
689 /* This new data comes before the last we emitted (or the last seek); discard it */
690 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
695 } else if (_last_audio_time && i->second > *_last_audio_time) {
696 /* There's a gap between this data and the last we emitted; fill with silence */
697 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
700 emit_audio (i->first, i->second);
705 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
706 do_emit_video(i->first, i->second);
714 Player::closed_captions_for_frame (DCPTime time) const
716 boost::mutex::scoped_lock _lm (_mutex);
717 return _active_texts[TEXT_CLOSED_CAPTION].get (
718 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
722 /** @return Open subtitles for the frame at the given time, converted to images */
723 optional<PositionImage>
724 Player::open_subtitles_for_frame (DCPTime time) const
726 list<PositionImage> captions;
727 int const vfr = _film->video_frame_rate();
731 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
734 /* Image subtitles */
735 list<PositionImage> c = transform_bitmap_texts (j.image);
736 copy (c.begin(), c.end(), back_inserter (captions));
738 /* Text subtitles (rendered to an image) */
739 if (!j.text.empty ()) {
740 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
741 copy (s.begin(), s.end(), back_inserter (captions));
745 if (captions.empty ()) {
746 return optional<PositionImage> ();
749 return merge (captions);
753 Player::video (weak_ptr<Piece> wp, ContentVideo video)
755 shared_ptr<Piece> piece = wp.lock ();
760 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
761 if (frc.skip && (video.frame % 2) == 1) {
765 /* Time of the first frame we will emit */
766 DCPTime const time = content_video_to_dcp (piece, video.frame);
768 /* Discard if it's before the content's period or the last accurate seek. We can't discard
769 if it's after the content's period here as in that case we still need to fill any gap between
770 `now' and the end of the content's period.
772 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
776 /* Fill gaps that we discover now that we have some video which needs to be emitted.
777 This is where we need to fill to.
779 DCPTime fill_to = min (time, piece->content->end());
781 if (_last_video_time) {
782 DCPTime fill_from = max (*_last_video_time, piece->content->position());
783 LastVideoMap::const_iterator last = _last_video.find (wp);
784 if (_film->three_d()) {
785 DCPTime j = fill_from;
786 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
787 if (eyes == EYES_BOTH) {
790 while (j < fill_to || eyes != video.eyes) {
791 if (last != _last_video.end()) {
792 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
793 copy->set_eyes (eyes);
794 emit_video (copy, j);
796 emit_video (black_player_video_frame(eyes), j);
798 if (eyes == EYES_RIGHT) {
799 j += one_video_frame();
801 eyes = increment_eyes (eyes);
804 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
805 if (last != _last_video.end()) {
806 emit_video (last->second, j);
808 emit_video (black_player_video_frame(EYES_BOTH), j);
814 _last_video[wp].reset (
817 piece->content->video->crop (),
818 piece->content->video->fade (video.frame),
819 piece->content->video->scale().size (
820 piece->content->video, _video_container_size, _film->frame_size ()
822 _video_container_size,
825 piece->content->video->colour_conversion(),
832 for (int i = 0; i < frc.repeat; ++i) {
833 if (t < piece->content->end()) {
834 emit_video (_last_video[wp], t);
836 t += one_video_frame ();
841 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
843 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
845 shared_ptr<Piece> piece = wp.lock ();
850 shared_ptr<AudioContent> content = piece->content->audio;
851 DCPOMATIC_ASSERT (content);
853 /* Compute time in the DCP */
854 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
855 /* And the end of this block in the DCP */
856 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
858 /* Remove anything that comes before the start or after the end of the content */
859 if (time < piece->content->position()) {
860 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
862 /* This audio is entirely discarded */
865 content_audio.audio = cut.first;
867 } else if (time > piece->content->end()) {
870 } else if (end > piece->content->end()) {
871 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
872 if (remaining_frames == 0) {
875 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
876 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
877 content_audio.audio = cut;
880 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
884 if (content->gain() != 0) {
885 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
886 gain->apply_gain (content->gain ());
887 content_audio.audio = gain;
892 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
896 if (_audio_processor) {
897 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
902 _audio_merger.push (content_audio.audio, time);
903 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
904 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
908 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
910 shared_ptr<Piece> piece = wp.lock ();
911 shared_ptr<const TextContent> text = wc.lock ();
912 if (!piece || !text) {
916 /* Apply content's subtitle offsets */
917 subtitle.sub.rectangle.x += text->x_offset ();
918 subtitle.sub.rectangle.y += text->y_offset ();
920 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
921 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
922 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
924 /* Apply content's subtitle scale */
925 subtitle.sub.rectangle.width *= text->x_scale ();
926 subtitle.sub.rectangle.height *= text->y_scale ();
929 ps.image.push_back (subtitle.sub);
930 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
932 _active_texts[subtitle.type()].add_from (wc, ps, from);
936 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
938 shared_ptr<Piece> piece = wp.lock ();
939 shared_ptr<const TextContent> text = wc.lock ();
940 if (!piece || !text) {
945 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
947 if (from > piece->content->end()) {
951 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
952 s.set_h_position (s.h_position() + text->x_offset ());
953 s.set_v_position (s.v_position() + text->y_offset ());
954 float const xs = text->x_scale();
955 float const ys = text->y_scale();
956 float size = s.size();
958 /* Adjust size to express the common part of the scaling;
959 e.g. if xs = ys = 0.5 we scale size by 2.
961 if (xs > 1e-5 && ys > 1e-5) {
962 size *= 1 / min (1 / xs, 1 / ys);
966 /* Then express aspect ratio changes */
967 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
968 s.set_aspect_adjust (xs / ys);
971 s.set_in (dcp::Time(from.seconds(), 1000));
972 ps.text.push_back (StringText (s, text->outline_width()));
973 ps.add_fonts (text->fonts ());
976 _active_texts[subtitle.type()].add_from (wc, ps, from);
980 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
982 if (!_active_texts[type].have (wc)) {
986 shared_ptr<Piece> piece = wp.lock ();
987 shared_ptr<const TextContent> text = wc.lock ();
988 if (!piece || !text) {
992 DCPTime const dcp_to = content_time_to_dcp (piece, to);
994 if (dcp_to > piece->content->end()) {
998 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1000 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1001 if (text->use() && !always && !text->burn()) {
1002 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1007 Player::seek (DCPTime time, bool accurate)
1009 boost::mutex::scoped_lock lm (_mutex);
1011 if (!_have_valid_pieces) {
1016 _shuffler->clear ();
1021 if (_audio_processor) {
1022 _audio_processor->flush ();
1025 _audio_merger.clear ();
1026 for (int i = 0; i < TEXT_COUNT; ++i) {
1027 _active_texts[i].clear ();
1030 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1031 if (time < i->content->position()) {
1032 /* Before; seek to the start of the content */
1033 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1035 } else if (i->content->position() <= time && time < i->content->end()) {
1036 /* During; seek to position */
1037 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1040 /* After; this piece is done */
1046 _last_video_time = time;
1047 _last_video_eyes = EYES_LEFT;
1048 _last_audio_time = time;
1050 _last_video_time = optional<DCPTime>();
1051 _last_video_eyes = optional<Eyes>();
1052 _last_audio_time = optional<DCPTime>();
1055 _black.set_position (time);
1056 _silent.set_position (time);
1058 _last_video.clear ();
1062 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1064 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1065 player before the video that requires them.
1067 _delay.push_back (make_pair (pv, time));
1069 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1070 _last_video_time = time + one_video_frame();
1072 _last_video_eyes = increment_eyes (pv->eyes());
1074 if (_delay.size() < 3) {
1078 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1080 do_emit_video (to_do.first, to_do.second);
1084 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1086 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1087 for (int i = 0; i < TEXT_COUNT; ++i) {
1088 _active_texts[i].clear_before (time);
1092 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1094 pv->set_text (subtitles.get ());
1101 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1103 /* Log if the assert below is about to fail */
1104 if (_last_audio_time && time != *_last_audio_time) {
1105 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1108 /* This audio must follow on from the previous */
1109 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1111 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1115 Player::fill_audio (DCPTimePeriod period)
1117 if (period.from == period.to) {
1121 DCPOMATIC_ASSERT (period.from < period.to);
1123 DCPTime t = period.from;
1124 while (t < period.to) {
1125 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1126 Frame const samples = block.frames_round(_film->audio_frame_rate());
1128 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1129 silence->make_silent ();
1130 emit_audio (silence, t);
1137 Player::one_video_frame () const
1139 return DCPTime::from_frames (1, _film->video_frame_rate ());
1142 pair<shared_ptr<AudioBuffers>, DCPTime>
1143 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1145 DCPTime const discard_time = discard_to - time;
1146 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1147 Frame remaining_frames = audio->frames() - discard_frames;
1148 if (remaining_frames <= 0) {
1149 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1151 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1152 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1153 return make_pair(cut, time + discard_time);
1157 Player::set_dcp_decode_reduction (optional<int> reduction)
1160 boost::mutex::scoped_lock lm (_mutex);
1162 if (reduction == _dcp_decode_reduction) {
1166 _dcp_decode_reduction = reduction;
1167 _have_valid_pieces = false;
1170 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1174 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1176 boost::mutex::scoped_lock lm (_mutex);
1178 if (_have_valid_pieces) {
1182 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1183 if (i->content == content) {
1184 return content_time_to_dcp (i, t);
1188 DCPOMATIC_ASSERT (false);