2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
103 set_video_container_size (_film->frame_size ());
105 film_changed (Film::AUDIO_PROCESSOR);
107 seek (DCPTime (), true);
116 Player::setup_pieces ()
121 _shuffler = new Shuffler();
122 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
124 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
126 if (!i->paths_valid ()) {
130 if (_ignore_video && _ignore_audio && i->text.empty()) {
131 /* We're only interested in text and this content has none */
135 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
136 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
139 /* Not something that we can decode; e.g. Atmos content */
143 if (decoder->video && _ignore_video) {
144 decoder->video->set_ignore (true);
147 if (decoder->audio && _ignore_audio) {
148 decoder->audio->set_ignore (true);
152 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
153 i->set_ignore (true);
157 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
159 dcp->set_decode_referenced (_play_referenced);
160 if (_play_referenced) {
161 dcp->set_forced_reduction (_dcp_decode_reduction);
165 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
166 _pieces.push_back (piece);
168 if (decoder->video) {
169 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
170 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
171 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
173 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
177 if (decoder->audio) {
178 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
181 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
183 while (j != decoder->text.end()) {
184 (*j)->BitmapStart.connect (
185 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
187 (*j)->PlainStart.connect (
188 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
191 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
198 _stream_states.clear ();
199 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
200 if (i->content->audio) {
201 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
202 _stream_states[j] = StreamState (i, i->content->position ());
207 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
208 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
210 _last_video_time = DCPTime ();
211 _last_video_eyes = EYES_BOTH;
212 _last_audio_time = DCPTime ();
213 _have_valid_pieces = true;
217 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
219 shared_ptr<Content> c = w.lock ();
225 property == ContentProperty::POSITION ||
226 property == ContentProperty::LENGTH ||
227 property == ContentProperty::TRIM_START ||
228 property == ContentProperty::TRIM_END ||
229 property == ContentProperty::PATH ||
230 property == VideoContentProperty::FRAME_TYPE ||
231 property == VideoContentProperty::COLOUR_CONVERSION ||
232 property == AudioContentProperty::STREAMS ||
233 property == DCPContentProperty::NEEDS_ASSETS ||
234 property == DCPContentProperty::NEEDS_KDM ||
235 property == TextContentProperty::COLOUR ||
236 property == TextContentProperty::EFFECT ||
237 property == TextContentProperty::EFFECT_COLOUR ||
238 property == FFmpegContentProperty::SUBTITLE_STREAM ||
239 property == FFmpegContentProperty::FILTERS
243 boost::mutex::scoped_lock lm (_mutex);
244 _have_valid_pieces = false;
247 Changed (property, frequent);
250 property == TextContentProperty::LINE_SPACING ||
251 property == TextContentProperty::OUTLINE_WIDTH ||
252 property == TextContentProperty::Y_SCALE ||
253 property == TextContentProperty::FADE_IN ||
254 property == TextContentProperty::FADE_OUT ||
255 property == ContentProperty::VIDEO_FRAME_RATE ||
256 property == TextContentProperty::USE ||
257 property == TextContentProperty::X_OFFSET ||
258 property == TextContentProperty::Y_OFFSET ||
259 property == TextContentProperty::X_SCALE ||
260 property == TextContentProperty::FONTS ||
261 property == TextContentProperty::TYPE ||
262 property == VideoContentProperty::CROP ||
263 property == VideoContentProperty::SCALE ||
264 property == VideoContentProperty::FADE_IN ||
265 property == VideoContentProperty::FADE_OUT
268 Changed (property, frequent);
273 Player::set_video_container_size (dcp::Size s)
276 boost::mutex::scoped_lock lm (_mutex);
278 if (s == _video_container_size) {
282 _video_container_size = s;
284 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
285 _black_image->make_black ();
288 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
292 Player::playlist_changed ()
295 boost::mutex::scoped_lock lm (_mutex);
296 _have_valid_pieces = false;
299 Changed (PlayerProperty::PLAYLIST, false);
303 Player::film_changed (Film::Property p)
305 /* Here we should notice Film properties that affect our output, and
306 alert listeners that our output now would be different to how it was
307 last time we were run.
310 if (p == Film::CONTAINER) {
311 Changed (PlayerProperty::FILM_CONTAINER, false);
312 } else if (p == Film::VIDEO_FRAME_RATE) {
313 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
314 so we need new pieces here.
317 boost::mutex::scoped_lock lm (_mutex);
318 _have_valid_pieces = false;
320 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321 } else if (p == Film::AUDIO_PROCESSOR) {
322 if (_film->audio_processor ()) {
323 boost::mutex::scoped_lock lm (_mutex);
324 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
326 } else if (p == Film::AUDIO_CHANNELS) {
327 boost::mutex::scoped_lock lm (_mutex);
328 _audio_merger.clear ();
333 Player::transform_bitmap_texts (list<BitmapText> subs) const
335 list<PositionImage> all;
337 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
342 /* We will scale the subtitle up to fit _video_container_size */
343 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
349 dcp::YUV_TO_RGB_REC601,
350 i->image->pixel_format (),
355 lrint (_video_container_size.width * i->rectangle.x),
356 lrint (_video_container_size.height * i->rectangle.y)
365 shared_ptr<PlayerVideo>
366 Player::black_player_video_frame (Eyes eyes) const
368 return shared_ptr<PlayerVideo> (
370 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
373 _video_container_size,
374 _video_container_size,
377 PresetColourConversion::all().front().conversion,
378 boost::weak_ptr<Content>(),
379 boost::optional<Frame>()
385 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
387 DCPTime s = t - piece->content->position ();
388 s = min (piece->content->length_after_trim(), s);
389 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
391 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
392 then convert that ContentTime to frames at the content's rate. However this fails for
393 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
394 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
396 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
398 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
402 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
404 /* See comment in dcp_to_content_video */
405 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
406 return d + piece->content->position();
410 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
412 DCPTime s = t - piece->content->position ();
413 s = min (piece->content->length_after_trim(), s);
414 /* See notes in dcp_to_content_video */
415 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
419 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
421 /* See comment in dcp_to_content_video */
422 return DCPTime::from_frames (f, _film->audio_frame_rate())
423 - DCPTime (piece->content->trim_start(), piece->frc)
424 + piece->content->position();
428 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
430 DCPTime s = t - piece->content->position ();
431 s = min (piece->content->length_after_trim(), s);
432 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
436 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
438 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
441 list<shared_ptr<Font> >
442 Player::get_subtitle_fonts ()
444 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
446 if (!_have_valid_pieces) {
450 list<shared_ptr<Font> > fonts;
451 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
452 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
453 /* XXX: things may go wrong if there are duplicate font IDs
454 with different font files.
456 list<shared_ptr<Font> > f = j->fonts ();
457 copy (f.begin(), f.end(), back_inserter (fonts));
464 /** Set this player never to produce any video data */
466 Player::set_ignore_video ()
468 boost::mutex::scoped_lock lm (_mutex);
469 _ignore_video = true;
474 Player::set_ignore_audio ()
476 boost::mutex::scoped_lock lm (_mutex);
477 _ignore_audio = true;
482 Player::set_ignore_text ()
484 boost::mutex::scoped_lock lm (_mutex);
489 /** Set the player to always burn open texts into the image regardless of the content settings */
491 Player::set_always_burn_open_subtitles ()
493 boost::mutex::scoped_lock lm (_mutex);
494 _always_burn_open_subtitles = true;
497 /** Sets up the player to be faster, possibly at the expense of quality */
501 boost::mutex::scoped_lock lm (_mutex);
507 Player::set_play_referenced ()
509 boost::mutex::scoped_lock lm (_mutex);
510 _play_referenced = true;
514 list<ReferencedReelAsset>
515 Player::get_reel_assets ()
517 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
519 list<ReferencedReelAsset> a;
521 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
522 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
527 scoped_ptr<DCPDecoder> decoder;
529 decoder.reset (new DCPDecoder (j, _film->log(), false));
535 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
537 DCPOMATIC_ASSERT (j->video_frame_rate ());
538 double const cfr = j->video_frame_rate().get();
539 Frame const trim_start = j->trim_start().frames_round (cfr);
540 Frame const trim_end = j->trim_end().frames_round (cfr);
541 int const ffr = _film->video_frame_rate ();
543 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
544 if (j->reference_video ()) {
545 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
546 DCPOMATIC_ASSERT (ra);
547 ra->set_entry_point (ra->entry_point() + trim_start);
548 ra->set_duration (ra->duration() - trim_start - trim_end);
550 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
554 if (j->reference_audio ()) {
555 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
556 DCPOMATIC_ASSERT (ra);
557 ra->set_entry_point (ra->entry_point() + trim_start);
558 ra->set_duration (ra->duration() - trim_start - trim_end);
560 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
564 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
565 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
566 DCPOMATIC_ASSERT (ra);
567 ra->set_entry_point (ra->entry_point() + trim_start);
568 ra->set_duration (ra->duration() - trim_start - trim_end);
570 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
574 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
575 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
576 DCPOMATIC_ASSERT (ra);
577 ra->set_entry_point (ra->entry_point() + trim_start);
578 ra->set_duration (ra->duration() - trim_start - trim_end);
580 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
584 /* Assume that main picture duration is the length of the reel */
585 offset += k->main_picture()->duration ();
595 boost::mutex::scoped_lock lm (_mutex);
597 if (!_have_valid_pieces) {
598 /* This should only happen when we are under the control of the butler. In this case, _have_valid_pieces
599 will be false if something in the Player has changed and we are waiting for the butler to notice
600 and do a seek back to the place we were at before. During this time we don't want pass() to do anything,
601 as just after setup_pieces the new decoders will be back to time 0 until the seek has gone through. Just do nothing
602 here and assume that the seek will be forthcoming.
607 if (_playlist->length() == DCPTime()) {
608 /* Special case of an empty Film; just give one black frame */
609 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
613 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
615 shared_ptr<Piece> earliest_content;
616 optional<DCPTime> earliest_time;
618 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
623 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
624 if (t > i->content->end()) {
628 /* Given two choices at the same time, pick the one with texts so we see it before
631 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
633 earliest_content = i;
647 if (earliest_content) {
651 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
652 earliest_time = _black.position ();
656 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
657 earliest_time = _silent.position ();
663 earliest_content->done = earliest_content->decoder->pass ();
666 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
667 _black.set_position (_black.position() + one_video_frame());
671 DCPTimePeriod period (_silent.period_at_position());
672 if (_last_audio_time) {
673 /* Sometimes the thing that happened last finishes fractionally before
674 this silence. Bodge the start time of the silence to fix it. I'm
675 not sure if this is the right solution --- maybe the last thing should
676 be padded `forward' rather than this thing padding `back'.
678 period.from = min(period.from, *_last_audio_time);
680 if (period.duration() > one_video_frame()) {
681 period.to = period.from + one_video_frame();
684 _silent.set_position (period.to);
692 /* Emit any audio that is ready */
694 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
695 of our streams, or the position of the _silent.
697 DCPTime pull_to = _film->length ();
698 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
699 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
700 pull_to = i->second.last_push_end;
703 if (!_silent.done() && _silent.position() < pull_to) {
704 pull_to = _silent.position();
707 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
708 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
709 if (_last_audio_time && i->second < *_last_audio_time) {
710 /* This new data comes before the last we emitted (or the last seek); discard it */
711 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
716 } else if (_last_audio_time && i->second > *_last_audio_time) {
717 /* There's a gap between this data and the last we emitted; fill with silence */
718 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
721 emit_audio (i->first, i->second);
726 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
727 do_emit_video(i->first, i->second);
734 /** @return Open subtitles for the frame at the given time, converted to images */
735 optional<PositionImage>
736 Player::open_subtitles_for_frame (DCPTime time) const
738 list<PositionImage> captions;
739 int const vfr = _film->video_frame_rate();
743 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
746 /* Bitmap subtitles */
747 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
748 copy (c.begin(), c.end(), back_inserter (captions));
750 /* String subtitles (rendered to an image) */
751 if (!j.string.empty ()) {
752 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
753 copy (s.begin(), s.end(), back_inserter (captions));
757 if (captions.empty ()) {
758 return optional<PositionImage> ();
761 return merge (captions);
765 Player::video (weak_ptr<Piece> wp, ContentVideo video)
767 shared_ptr<Piece> piece = wp.lock ();
772 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
773 if (frc.skip && (video.frame % 2) == 1) {
777 /* Time of the first frame we will emit */
778 DCPTime const time = content_video_to_dcp (piece, video.frame);
780 /* Discard if it's before the content's period or the last accurate seek. We can't discard
781 if it's after the content's period here as in that case we still need to fill any gap between
782 `now' and the end of the content's period.
784 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
788 /* Fill gaps that we discover now that we have some video which needs to be emitted.
789 This is where we need to fill to.
791 DCPTime fill_to = min (time, piece->content->end());
793 if (_last_video_time) {
794 DCPTime fill_from = max (*_last_video_time, piece->content->position());
795 LastVideoMap::const_iterator last = _last_video.find (wp);
796 if (_film->three_d()) {
797 Eyes fill_to_eyes = video.eyes;
798 if (fill_to == piece->content->end()) {
799 /* Don't fill after the end of the content */
800 fill_to_eyes = EYES_LEFT;
802 DCPTime j = fill_from;
803 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
804 if (eyes == EYES_BOTH) {
807 while (j < fill_to || eyes != fill_to_eyes) {
808 if (last != _last_video.end()) {
809 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
810 copy->set_eyes (eyes);
811 emit_video (copy, j);
813 emit_video (black_player_video_frame(eyes), j);
815 if (eyes == EYES_RIGHT) {
816 j += one_video_frame();
818 eyes = increment_eyes (eyes);
821 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
822 if (last != _last_video.end()) {
823 emit_video (last->second, j);
825 emit_video (black_player_video_frame(EYES_BOTH), j);
831 _last_video[wp].reset (
834 piece->content->video->crop (),
835 piece->content->video->fade (video.frame),
836 piece->content->video->scale().size (
837 piece->content->video, _video_container_size, _film->frame_size ()
839 _video_container_size,
842 piece->content->video->colour_conversion(),
849 for (int i = 0; i < frc.repeat; ++i) {
850 if (t < piece->content->end()) {
851 emit_video (_last_video[wp], t);
853 t += one_video_frame ();
858 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
860 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
862 shared_ptr<Piece> piece = wp.lock ();
867 shared_ptr<AudioContent> content = piece->content->audio;
868 DCPOMATIC_ASSERT (content);
870 /* Compute time in the DCP */
871 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
872 /* And the end of this block in the DCP */
873 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
875 /* Remove anything that comes before the start or after the end of the content */
876 if (time < piece->content->position()) {
877 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
879 /* This audio is entirely discarded */
882 content_audio.audio = cut.first;
884 } else if (time > piece->content->end()) {
887 } else if (end > piece->content->end()) {
888 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
889 if (remaining_frames == 0) {
892 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
893 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
894 content_audio.audio = cut;
897 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
901 if (content->gain() != 0) {
902 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
903 gain->apply_gain (content->gain ());
904 content_audio.audio = gain;
909 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
913 if (_audio_processor) {
914 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
919 _audio_merger.push (content_audio.audio, time);
920 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
921 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
925 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
927 shared_ptr<Piece> piece = wp.lock ();
928 shared_ptr<const TextContent> text = wc.lock ();
929 if (!piece || !text) {
933 /* Apply content's subtitle offsets */
934 subtitle.sub.rectangle.x += text->x_offset ();
935 subtitle.sub.rectangle.y += text->y_offset ();
937 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
938 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
939 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
941 /* Apply content's subtitle scale */
942 subtitle.sub.rectangle.width *= text->x_scale ();
943 subtitle.sub.rectangle.height *= text->y_scale ();
946 ps.bitmap.push_back (subtitle.sub);
947 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
949 _active_texts[subtitle.type()].add_from (wc, ps, from);
953 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
955 shared_ptr<Piece> piece = wp.lock ();
956 shared_ptr<const TextContent> text = wc.lock ();
957 if (!piece || !text) {
962 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
964 if (from > piece->content->end()) {
968 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
969 s.set_h_position (s.h_position() + text->x_offset ());
970 s.set_v_position (s.v_position() + text->y_offset ());
971 float const xs = text->x_scale();
972 float const ys = text->y_scale();
973 float size = s.size();
975 /* Adjust size to express the common part of the scaling;
976 e.g. if xs = ys = 0.5 we scale size by 2.
978 if (xs > 1e-5 && ys > 1e-5) {
979 size *= 1 / min (1 / xs, 1 / ys);
983 /* Then express aspect ratio changes */
984 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
985 s.set_aspect_adjust (xs / ys);
988 s.set_in (dcp::Time(from.seconds(), 1000));
989 ps.string.push_back (StringText (s, text->outline_width()));
990 ps.add_fonts (text->fonts ());
993 _active_texts[subtitle.type()].add_from (wc, ps, from);
997 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
999 if (!_active_texts[type].have (wc)) {
1003 shared_ptr<Piece> piece = wp.lock ();
1004 shared_ptr<const TextContent> text = wc.lock ();
1005 if (!piece || !text) {
1009 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1011 if (dcp_to > piece->content->end()) {
1015 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1017 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1018 if (text->use() && !always && !text->burn()) {
1019 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1024 Player::seek (DCPTime time, bool accurate)
1026 boost::mutex::scoped_lock lm (_mutex);
1028 if (!_have_valid_pieces) {
1033 _shuffler->clear ();
1038 if (_audio_processor) {
1039 _audio_processor->flush ();
1042 _audio_merger.clear ();
1043 for (int i = 0; i < TEXT_COUNT; ++i) {
1044 _active_texts[i].clear ();
1047 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1048 if (time < i->content->position()) {
1049 /* Before; seek to the start of the content */
1050 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1052 } else if (i->content->position() <= time && time < i->content->end()) {
1053 /* During; seek to position */
1054 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1057 /* After; this piece is done */
1063 _last_video_time = time;
1064 _last_video_eyes = EYES_LEFT;
1065 _last_audio_time = time;
1067 _last_video_time = optional<DCPTime>();
1068 _last_video_eyes = optional<Eyes>();
1069 _last_audio_time = optional<DCPTime>();
1072 _black.set_position (time);
1073 _silent.set_position (time);
1075 _last_video.clear ();
1079 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1081 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1082 player before the video that requires them.
1084 _delay.push_back (make_pair (pv, time));
1086 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1087 _last_video_time = time + one_video_frame();
1089 _last_video_eyes = increment_eyes (pv->eyes());
1091 if (_delay.size() < 3) {
1095 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1097 do_emit_video (to_do.first, to_do.second);
1101 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1103 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1104 for (int i = 0; i < TEXT_COUNT; ++i) {
1105 _active_texts[i].clear_before (time);
1109 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1111 pv->set_text (subtitles.get ());
1118 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1120 /* Log if the assert below is about to fail */
1121 if (_last_audio_time && time != *_last_audio_time) {
1122 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1125 /* This audio must follow on from the previous */
1126 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1128 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1132 Player::fill_audio (DCPTimePeriod period)
1134 if (period.from == period.to) {
1138 DCPOMATIC_ASSERT (period.from < period.to);
1140 DCPTime t = period.from;
1141 while (t < period.to) {
1142 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1143 Frame const samples = block.frames_round(_film->audio_frame_rate());
1145 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1146 silence->make_silent ();
1147 emit_audio (silence, t);
1154 Player::one_video_frame () const
1156 return DCPTime::from_frames (1, _film->video_frame_rate ());
1159 pair<shared_ptr<AudioBuffers>, DCPTime>
1160 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1162 DCPTime const discard_time = discard_to - time;
1163 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1164 Frame remaining_frames = audio->frames() - discard_frames;
1165 if (remaining_frames <= 0) {
1166 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1168 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1169 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1170 return make_pair(cut, time + discard_time);
1174 Player::set_dcp_decode_reduction (optional<int> reduction)
1177 boost::mutex::scoped_lock lm (_mutex);
1179 if (reduction == _dcp_decode_reduction) {
1183 _dcp_decode_reduction = reduction;
1184 _have_valid_pieces = false;
1187 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1191 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1193 boost::mutex::scoped_lock lm (_mutex);
1195 if (_have_valid_pieces) {
1199 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1200 if (i->content == content) {
1201 return content_time_to_dcp (i, t);
1205 DCPOMATIC_ASSERT (false);