2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
103 set_video_container_size (_film->frame_size ());
105 film_changed (Film::AUDIO_PROCESSOR);
107 seek (DCPTime (), true);
116 Player::setup_pieces ()
121 _shuffler = new Shuffler();
122 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
124 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
126 if (!i->paths_valid ()) {
130 if (_ignore_video && _ignore_audio && i->text.empty()) {
131 /* We're only interested in text and this content has none */
135 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
136 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
139 /* Not something that we can decode; e.g. Atmos content */
143 if (decoder->video && _ignore_video) {
144 decoder->video->set_ignore (true);
147 if (decoder->audio && _ignore_audio) {
148 decoder->audio->set_ignore (true);
152 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
153 i->set_ignore (true);
157 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
159 dcp->set_decode_referenced (_play_referenced);
160 if (_play_referenced) {
161 dcp->set_forced_reduction (_dcp_decode_reduction);
165 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
166 _pieces.push_back (piece);
168 if (decoder->video) {
169 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
170 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
171 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
173 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
177 if (decoder->audio) {
178 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
181 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
183 while (j != decoder->text.end()) {
184 (*j)->BitmapStart.connect (
185 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
187 (*j)->PlainStart.connect (
188 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
191 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
198 _stream_states.clear ();
199 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
200 if (i->content->audio) {
201 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
202 _stream_states[j] = StreamState (i, i->content->position ());
207 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
208 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
210 _last_video_time = DCPTime ();
211 _last_video_eyes = EYES_BOTH;
212 _last_audio_time = DCPTime ();
213 _have_valid_pieces = true;
217 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
219 shared_ptr<Content> c = w.lock ();
225 property == ContentProperty::POSITION ||
226 property == ContentProperty::LENGTH ||
227 property == ContentProperty::TRIM_START ||
228 property == ContentProperty::TRIM_END ||
229 property == ContentProperty::PATH ||
230 property == VideoContentProperty::FRAME_TYPE ||
231 property == VideoContentProperty::COLOUR_CONVERSION ||
232 property == AudioContentProperty::STREAMS ||
233 property == DCPContentProperty::NEEDS_ASSETS ||
234 property == DCPContentProperty::NEEDS_KDM ||
235 property == TextContentProperty::COLOUR ||
236 property == TextContentProperty::EFFECT ||
237 property == TextContentProperty::EFFECT_COLOUR ||
238 property == FFmpegContentProperty::SUBTITLE_STREAM ||
239 property == FFmpegContentProperty::FILTERS
243 boost::mutex::scoped_lock lm (_mutex);
244 _have_valid_pieces = false;
247 Changed (property, frequent);
250 property == TextContentProperty::LINE_SPACING ||
251 property == TextContentProperty::OUTLINE_WIDTH ||
252 property == TextContentProperty::Y_SCALE ||
253 property == TextContentProperty::FADE_IN ||
254 property == TextContentProperty::FADE_OUT ||
255 property == ContentProperty::VIDEO_FRAME_RATE ||
256 property == TextContentProperty::USE ||
257 property == TextContentProperty::X_OFFSET ||
258 property == TextContentProperty::Y_OFFSET ||
259 property == TextContentProperty::X_SCALE ||
260 property == TextContentProperty::FONTS ||
261 property == TextContentProperty::TYPE ||
262 property == VideoContentProperty::CROP ||
263 property == VideoContentProperty::SCALE ||
264 property == VideoContentProperty::FADE_IN ||
265 property == VideoContentProperty::FADE_OUT
268 Changed (property, frequent);
273 Player::set_video_container_size (dcp::Size s)
276 boost::mutex::scoped_lock lm (_mutex);
278 if (s == _video_container_size) {
282 _video_container_size = s;
284 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
285 _black_image->make_black ();
288 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
292 Player::playlist_changed ()
295 boost::mutex::scoped_lock lm (_mutex);
296 _have_valid_pieces = false;
299 Changed (PlayerProperty::PLAYLIST, false);
303 Player::film_changed (Film::Property p)
305 /* Here we should notice Film properties that affect our output, and
306 alert listeners that our output now would be different to how it was
307 last time we were run.
310 if (p == Film::CONTAINER) {
311 Changed (PlayerProperty::FILM_CONTAINER, false);
312 } else if (p == Film::VIDEO_FRAME_RATE) {
313 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
314 so we need new pieces here.
317 boost::mutex::scoped_lock lm (_mutex);
318 _have_valid_pieces = false;
320 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321 } else if (p == Film::AUDIO_PROCESSOR) {
322 if (_film->audio_processor ()) {
323 boost::mutex::scoped_lock lm (_mutex);
324 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
326 } else if (p == Film::AUDIO_CHANNELS) {
327 boost::mutex::scoped_lock lm (_mutex);
328 _audio_merger.clear ();
333 Player::transform_bitmap_texts (list<BitmapText> subs) const
335 list<PositionImage> all;
337 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
342 /* We will scale the subtitle up to fit _video_container_size */
343 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
349 dcp::YUV_TO_RGB_REC601,
350 i->image->pixel_format (),
355 lrint (_video_container_size.width * i->rectangle.x),
356 lrint (_video_container_size.height * i->rectangle.y)
365 shared_ptr<PlayerVideo>
366 Player::black_player_video_frame (Eyes eyes) const
368 return shared_ptr<PlayerVideo> (
370 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
373 _video_container_size,
374 _video_container_size,
377 PresetColourConversion::all().front().conversion,
378 boost::weak_ptr<Content>(),
379 boost::optional<Frame>()
385 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
387 DCPTime s = t - piece->content->position ();
388 s = min (piece->content->length_after_trim(), s);
389 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
391 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
392 then convert that ContentTime to frames at the content's rate. However this fails for
393 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
394 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
396 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
398 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
402 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
404 /* See comment in dcp_to_content_video */
405 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
406 return d + piece->content->position();
410 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
412 DCPTime s = t - piece->content->position ();
413 s = min (piece->content->length_after_trim(), s);
414 /* See notes in dcp_to_content_video */
415 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
419 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
421 /* See comment in dcp_to_content_video */
422 return DCPTime::from_frames (f, _film->audio_frame_rate())
423 - DCPTime (piece->content->trim_start(), piece->frc)
424 + piece->content->position();
428 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
430 DCPTime s = t - piece->content->position ();
431 s = min (piece->content->length_after_trim(), s);
432 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
436 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
438 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
441 list<shared_ptr<Font> >
442 Player::get_subtitle_fonts ()
444 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
446 if (!_have_valid_pieces) {
450 list<shared_ptr<Font> > fonts;
451 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
452 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
453 /* XXX: things may go wrong if there are duplicate font IDs
454 with different font files.
456 list<shared_ptr<Font> > f = j->fonts ();
457 copy (f.begin(), f.end(), back_inserter (fonts));
464 /** Set this player never to produce any video data */
466 Player::set_ignore_video ()
468 boost::mutex::scoped_lock lm (_mutex);
469 _ignore_video = true;
470 _have_valid_pieces = false;
474 Player::set_ignore_audio ()
476 boost::mutex::scoped_lock lm (_mutex);
477 _ignore_audio = true;
478 _have_valid_pieces = false;
482 Player::set_ignore_text ()
484 boost::mutex::scoped_lock lm (_mutex);
488 /** Set the player to always burn open texts into the image regardless of the content settings */
490 Player::set_always_burn_open_subtitles ()
492 boost::mutex::scoped_lock lm (_mutex);
493 _always_burn_open_subtitles = true;
496 /** Sets up the player to be faster, possibly at the expense of quality */
500 boost::mutex::scoped_lock lm (_mutex);
502 _have_valid_pieces = false;
506 Player::set_play_referenced ()
508 boost::mutex::scoped_lock lm (_mutex);
509 _play_referenced = true;
510 _have_valid_pieces = false;
513 list<ReferencedReelAsset>
514 Player::get_reel_assets ()
516 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
518 list<ReferencedReelAsset> a;
520 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
521 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
526 scoped_ptr<DCPDecoder> decoder;
528 decoder.reset (new DCPDecoder (j, _film->log(), false));
534 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
536 DCPOMATIC_ASSERT (j->video_frame_rate ());
537 double const cfr = j->video_frame_rate().get();
538 Frame const trim_start = j->trim_start().frames_round (cfr);
539 Frame const trim_end = j->trim_end().frames_round (cfr);
540 int const ffr = _film->video_frame_rate ();
542 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
543 if (j->reference_video ()) {
544 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
545 DCPOMATIC_ASSERT (ra);
546 ra->set_entry_point (ra->entry_point() + trim_start);
547 ra->set_duration (ra->duration() - trim_start - trim_end);
549 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
553 if (j->reference_audio ()) {
554 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
555 DCPOMATIC_ASSERT (ra);
556 ra->set_entry_point (ra->entry_point() + trim_start);
557 ra->set_duration (ra->duration() - trim_start - trim_end);
559 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
563 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
564 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
565 DCPOMATIC_ASSERT (ra);
566 ra->set_entry_point (ra->entry_point() + trim_start);
567 ra->set_duration (ra->duration() - trim_start - trim_end);
569 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
573 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
574 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
575 DCPOMATIC_ASSERT (ra);
576 ra->set_entry_point (ra->entry_point() + trim_start);
577 ra->set_duration (ra->duration() - trim_start - trim_end);
579 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
583 /* Assume that main picture duration is the length of the reel */
584 offset += k->main_picture()->duration ();
594 boost::mutex::scoped_lock lm (_mutex);
596 if (!_have_valid_pieces) {
600 if (_playlist->length() == DCPTime()) {
601 /* Special case of an empty Film; just give one black frame */
602 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
606 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
608 shared_ptr<Piece> earliest_content;
609 optional<DCPTime> earliest_time;
611 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
616 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
617 if (t > i->content->end()) {
621 /* Given two choices at the same time, pick the one with texts so we see it before
624 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
626 earliest_content = i;
640 if (earliest_content) {
644 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
645 earliest_time = _black.position ();
649 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
650 earliest_time = _silent.position ();
656 earliest_content->done = earliest_content->decoder->pass ();
659 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
660 _black.set_position (_black.position() + one_video_frame());
664 DCPTimePeriod period (_silent.period_at_position());
665 if (_last_audio_time) {
666 /* Sometimes the thing that happened last finishes fractionally before
667 this silence. Bodge the start time of the silence to fix it. I'm
668 not sure if this is the right solution --- maybe the last thing should
669 be padded `forward' rather than this thing padding `back'.
671 period.from = min(period.from, *_last_audio_time);
673 if (period.duration() > one_video_frame()) {
674 period.to = period.from + one_video_frame();
677 _silent.set_position (period.to);
685 /* Emit any audio that is ready */
687 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
688 of our streams, or the position of the _silent.
690 DCPTime pull_to = _film->length ();
691 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
692 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
693 pull_to = i->second.last_push_end;
696 if (!_silent.done() && _silent.position() < pull_to) {
697 pull_to = _silent.position();
700 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
701 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
702 if (_last_audio_time && i->second < *_last_audio_time) {
703 /* This new data comes before the last we emitted (or the last seek); discard it */
704 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
709 } else if (_last_audio_time && i->second > *_last_audio_time) {
710 /* There's a gap between this data and the last we emitted; fill with silence */
711 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
714 emit_audio (i->first, i->second);
719 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
720 do_emit_video(i->first, i->second);
728 Player::closed_captions_for_frame (DCPTime time) const
730 boost::mutex::scoped_lock _lm (_mutex);
731 return _active_texts[TEXT_CLOSED_CAPTION].get (
732 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
736 /** @return Open subtitles for the frame at the given time, converted to images */
737 optional<PositionImage>
738 Player::open_subtitles_for_frame (DCPTime time) const
740 list<PositionImage> captions;
741 int const vfr = _film->video_frame_rate();
745 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
748 /* Bitmap subtitles */
749 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
750 copy (c.begin(), c.end(), back_inserter (captions));
752 /* String subtitles (rendered to an image) */
753 if (!j.string.empty ()) {
754 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
755 copy (s.begin(), s.end(), back_inserter (captions));
759 if (captions.empty ()) {
760 return optional<PositionImage> ();
763 return merge (captions);
767 Player::video (weak_ptr<Piece> wp, ContentVideo video)
769 shared_ptr<Piece> piece = wp.lock ();
774 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
775 if (frc.skip && (video.frame % 2) == 1) {
779 /* Time of the first frame we will emit */
780 DCPTime const time = content_video_to_dcp (piece, video.frame);
782 /* Discard if it's before the content's period or the last accurate seek. We can't discard
783 if it's after the content's period here as in that case we still need to fill any gap between
784 `now' and the end of the content's period.
786 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
790 /* Fill gaps that we discover now that we have some video which needs to be emitted.
791 This is where we need to fill to.
793 DCPTime fill_to = min (time, piece->content->end());
795 if (_last_video_time) {
796 DCPTime fill_from = max (*_last_video_time, piece->content->position());
797 LastVideoMap::const_iterator last = _last_video.find (wp);
798 if (_film->three_d()) {
799 DCPTime j = fill_from;
800 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
801 if (eyes == EYES_BOTH) {
804 while (j < fill_to || eyes != video.eyes) {
805 if (last != _last_video.end()) {
806 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
807 copy->set_eyes (eyes);
808 emit_video (copy, j);
810 emit_video (black_player_video_frame(eyes), j);
812 if (eyes == EYES_RIGHT) {
813 j += one_video_frame();
815 eyes = increment_eyes (eyes);
818 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
819 if (last != _last_video.end()) {
820 emit_video (last->second, j);
822 emit_video (black_player_video_frame(EYES_BOTH), j);
828 _last_video[wp].reset (
831 piece->content->video->crop (),
832 piece->content->video->fade (video.frame),
833 piece->content->video->scale().size (
834 piece->content->video, _video_container_size, _film->frame_size ()
836 _video_container_size,
839 piece->content->video->colour_conversion(),
846 for (int i = 0; i < frc.repeat; ++i) {
847 if (t < piece->content->end()) {
848 emit_video (_last_video[wp], t);
850 t += one_video_frame ();
855 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
857 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
859 shared_ptr<Piece> piece = wp.lock ();
864 shared_ptr<AudioContent> content = piece->content->audio;
865 DCPOMATIC_ASSERT (content);
867 /* Compute time in the DCP */
868 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
869 /* And the end of this block in the DCP */
870 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
872 /* Remove anything that comes before the start or after the end of the content */
873 if (time < piece->content->position()) {
874 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
876 /* This audio is entirely discarded */
879 content_audio.audio = cut.first;
881 } else if (time > piece->content->end()) {
884 } else if (end > piece->content->end()) {
885 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
886 if (remaining_frames == 0) {
889 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
890 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
891 content_audio.audio = cut;
894 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
898 if (content->gain() != 0) {
899 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
900 gain->apply_gain (content->gain ());
901 content_audio.audio = gain;
906 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
910 if (_audio_processor) {
911 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
916 _audio_merger.push (content_audio.audio, time);
917 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
918 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
922 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
924 shared_ptr<Piece> piece = wp.lock ();
925 shared_ptr<const TextContent> text = wc.lock ();
926 if (!piece || !text) {
930 /* Apply content's subtitle offsets */
931 subtitle.sub.rectangle.x += text->x_offset ();
932 subtitle.sub.rectangle.y += text->y_offset ();
934 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
935 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
936 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
938 /* Apply content's subtitle scale */
939 subtitle.sub.rectangle.width *= text->x_scale ();
940 subtitle.sub.rectangle.height *= text->y_scale ();
943 ps.bitmap.push_back (subtitle.sub);
944 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
946 _active_texts[subtitle.type()].add_from (wc, ps, from);
950 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
952 shared_ptr<Piece> piece = wp.lock ();
953 shared_ptr<const TextContent> text = wc.lock ();
954 if (!piece || !text) {
959 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
961 if (from > piece->content->end()) {
965 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
966 s.set_h_position (s.h_position() + text->x_offset ());
967 s.set_v_position (s.v_position() + text->y_offset ());
968 float const xs = text->x_scale();
969 float const ys = text->y_scale();
970 float size = s.size();
972 /* Adjust size to express the common part of the scaling;
973 e.g. if xs = ys = 0.5 we scale size by 2.
975 if (xs > 1e-5 && ys > 1e-5) {
976 size *= 1 / min (1 / xs, 1 / ys);
980 /* Then express aspect ratio changes */
981 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
982 s.set_aspect_adjust (xs / ys);
985 s.set_in (dcp::Time(from.seconds(), 1000));
986 ps.string.push_back (StringText (s, text->outline_width()));
987 ps.add_fonts (text->fonts ());
990 _active_texts[subtitle.type()].add_from (wc, ps, from);
994 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
996 if (!_active_texts[type].have (wc)) {
1000 shared_ptr<Piece> piece = wp.lock ();
1001 shared_ptr<const TextContent> text = wc.lock ();
1002 if (!piece || !text) {
1006 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1008 if (dcp_to > piece->content->end()) {
1012 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1014 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1015 if (text->use() && !always && !text->burn()) {
1016 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1021 Player::seek (DCPTime time, bool accurate)
1023 boost::mutex::scoped_lock lm (_mutex);
1025 if (!_have_valid_pieces) {
1030 _shuffler->clear ();
1035 if (_audio_processor) {
1036 _audio_processor->flush ();
1039 _audio_merger.clear ();
1040 for (int i = 0; i < TEXT_COUNT; ++i) {
1041 _active_texts[i].clear ();
1044 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1045 if (time < i->content->position()) {
1046 /* Before; seek to the start of the content */
1047 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1049 } else if (i->content->position() <= time && time < i->content->end()) {
1050 /* During; seek to position */
1051 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1054 /* After; this piece is done */
1060 _last_video_time = time;
1061 _last_video_eyes = EYES_LEFT;
1062 _last_audio_time = time;
1064 _last_video_time = optional<DCPTime>();
1065 _last_video_eyes = optional<Eyes>();
1066 _last_audio_time = optional<DCPTime>();
1069 _black.set_position (time);
1070 _silent.set_position (time);
1072 _last_video.clear ();
1076 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1078 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1079 player before the video that requires them.
1081 _delay.push_back (make_pair (pv, time));
1083 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1084 _last_video_time = time + one_video_frame();
1086 _last_video_eyes = increment_eyes (pv->eyes());
1088 if (_delay.size() < 3) {
1092 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1094 do_emit_video (to_do.first, to_do.second);
1098 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1100 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1101 for (int i = 0; i < TEXT_COUNT; ++i) {
1102 _active_texts[i].clear_before (time);
1106 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1108 pv->set_text (subtitles.get ());
1115 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1117 /* Log if the assert below is about to fail */
1118 if (_last_audio_time && time != *_last_audio_time) {
1119 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1122 /* This audio must follow on from the previous */
1123 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1125 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1129 Player::fill_audio (DCPTimePeriod period)
1131 if (period.from == period.to) {
1135 DCPOMATIC_ASSERT (period.from < period.to);
1137 DCPTime t = period.from;
1138 while (t < period.to) {
1139 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1140 Frame const samples = block.frames_round(_film->audio_frame_rate());
1142 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1143 silence->make_silent ();
1144 emit_audio (silence, t);
1151 Player::one_video_frame () const
1153 return DCPTime::from_frames (1, _film->video_frame_rate ());
1156 pair<shared_ptr<AudioBuffers>, DCPTime>
1157 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1159 DCPTime const discard_time = discard_to - time;
1160 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1161 Frame remaining_frames = audio->frames() - discard_frames;
1162 if (remaining_frames <= 0) {
1163 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1165 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1166 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1167 return make_pair(cut, time + discard_time);
1171 Player::set_dcp_decode_reduction (optional<int> reduction)
1174 boost::mutex::scoped_lock lm (_mutex);
1176 if (reduction == _dcp_decode_reduction) {
1180 _dcp_decode_reduction = reduction;
1181 _have_valid_pieces = false;
1184 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1188 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1190 boost::mutex::scoped_lock lm (_mutex);
1192 if (_have_valid_pieces) {
1196 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1197 if (i->content == content) {
1198 return content_time_to_dcp (i, t);
1202 DCPOMATIC_ASSERT (false);