2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
103 set_video_container_size (_film->frame_size ());
105 film_changed (Film::AUDIO_PROCESSOR);
107 seek (DCPTime (), true);
116 Player::setup_pieces ()
121 _shuffler = new Shuffler();
122 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
124 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
126 if (!i->paths_valid ()) {
130 if (_ignore_video && _ignore_audio && i->text.empty()) {
131 /* We're only interested in text and this content has none */
135 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
136 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
139 /* Not something that we can decode; e.g. Atmos content */
143 if (decoder->video && _ignore_video) {
144 decoder->video->set_ignore (true);
147 if (decoder->audio && _ignore_audio) {
148 decoder->audio->set_ignore (true);
152 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
153 i->set_ignore (true);
157 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
159 dcp->set_decode_referenced (_play_referenced);
160 if (_play_referenced) {
161 dcp->set_forced_reduction (_dcp_decode_reduction);
165 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
166 _pieces.push_back (piece);
168 if (decoder->video) {
169 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
170 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
171 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
173 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
177 if (decoder->audio) {
178 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
181 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
183 while (j != decoder->text.end()) {
184 (*j)->BitmapStart.connect (
185 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
187 (*j)->PlainStart.connect (
188 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
191 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
198 _stream_states.clear ();
199 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
200 if (i->content->audio) {
201 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
202 _stream_states[j] = StreamState (i, i->content->position ());
207 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
208 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
210 _last_video_time = DCPTime ();
211 _last_video_eyes = EYES_BOTH;
212 _last_audio_time = DCPTime ();
213 _have_valid_pieces = true;
217 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
219 shared_ptr<Content> c = w.lock ();
225 property == ContentProperty::POSITION ||
226 property == ContentProperty::LENGTH ||
227 property == ContentProperty::TRIM_START ||
228 property == ContentProperty::TRIM_END ||
229 property == ContentProperty::PATH ||
230 property == VideoContentProperty::FRAME_TYPE ||
231 property == VideoContentProperty::COLOUR_CONVERSION ||
232 property == AudioContentProperty::STREAMS ||
233 property == DCPContentProperty::NEEDS_ASSETS ||
234 property == DCPContentProperty::NEEDS_KDM ||
235 property == TextContentProperty::COLOUR ||
236 property == TextContentProperty::EFFECT ||
237 property == TextContentProperty::EFFECT_COLOUR ||
238 property == FFmpegContentProperty::SUBTITLE_STREAM ||
239 property == FFmpegContentProperty::FILTERS
242 _have_valid_pieces = false;
243 Changed (property, frequent);
246 property == TextContentProperty::LINE_SPACING ||
247 property == TextContentProperty::OUTLINE_WIDTH ||
248 property == TextContentProperty::Y_SCALE ||
249 property == TextContentProperty::FADE_IN ||
250 property == TextContentProperty::FADE_OUT ||
251 property == ContentProperty::VIDEO_FRAME_RATE ||
252 property == TextContentProperty::USE ||
253 property == TextContentProperty::X_OFFSET ||
254 property == TextContentProperty::Y_OFFSET ||
255 property == TextContentProperty::X_SCALE ||
256 property == TextContentProperty::FONTS ||
257 property == TextContentProperty::TYPE ||
258 property == VideoContentProperty::CROP ||
259 property == VideoContentProperty::SCALE ||
260 property == VideoContentProperty::FADE_IN ||
261 property == VideoContentProperty::FADE_OUT
264 Changed (property, frequent);
269 Player::set_video_container_size (dcp::Size s)
271 if (s == _video_container_size) {
275 _video_container_size = s;
277 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
278 _black_image->make_black ();
280 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284 Player::playlist_changed ()
286 _have_valid_pieces = false;
287 Changed (PlayerProperty::PLAYLIST, false);
291 Player::film_changed (Film::Property p)
293 /* Here we should notice Film properties that affect our output, and
294 alert listeners that our output now would be different to how it was
295 last time we were run.
298 if (p == Film::CONTAINER) {
299 Changed (PlayerProperty::FILM_CONTAINER, false);
300 } else if (p == Film::VIDEO_FRAME_RATE) {
301 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
302 so we need new pieces here.
304 _have_valid_pieces = false;
305 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
306 } else if (p == Film::AUDIO_PROCESSOR) {
307 if (_film->audio_processor ()) {
308 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
310 } else if (p == Film::AUDIO_CHANNELS) {
311 _audio_merger.clear ();
316 Player::transform_bitmap_texts (list<BitmapText> subs) const
318 list<PositionImage> all;
320 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
325 /* We will scale the subtitle up to fit _video_container_size */
326 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
332 dcp::YUV_TO_RGB_REC601,
333 i->image->pixel_format (),
338 lrint (_video_container_size.width * i->rectangle.x),
339 lrint (_video_container_size.height * i->rectangle.y)
348 shared_ptr<PlayerVideo>
349 Player::black_player_video_frame (Eyes eyes) const
351 return shared_ptr<PlayerVideo> (
353 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
356 _video_container_size,
357 _video_container_size,
360 PresetColourConversion::all().front().conversion,
361 boost::weak_ptr<Content>(),
362 boost::optional<Frame>()
368 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
370 DCPTime s = t - piece->content->position ();
371 s = min (piece->content->length_after_trim(), s);
372 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
374 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
375 then convert that ContentTime to frames at the content's rate. However this fails for
376 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
377 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
379 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
381 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
385 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
387 /* See comment in dcp_to_content_video */
388 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
389 return d + piece->content->position();
393 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
395 DCPTime s = t - piece->content->position ();
396 s = min (piece->content->length_after_trim(), s);
397 /* See notes in dcp_to_content_video */
398 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
402 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
404 /* See comment in dcp_to_content_video */
405 return DCPTime::from_frames (f, _film->audio_frame_rate())
406 - DCPTime (piece->content->trim_start(), piece->frc)
407 + piece->content->position();
411 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
413 DCPTime s = t - piece->content->position ();
414 s = min (piece->content->length_after_trim(), s);
415 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
419 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
421 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
424 list<shared_ptr<Font> >
425 Player::get_subtitle_fonts ()
427 if (!_have_valid_pieces) {
431 list<shared_ptr<Font> > fonts;
432 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
433 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
434 /* XXX: things may go wrong if there are duplicate font IDs
435 with different font files.
437 list<shared_ptr<Font> > f = j->fonts ();
438 copy (f.begin(), f.end(), back_inserter (fonts));
445 /** Set this player never to produce any video data */
447 Player::set_ignore_video ()
449 _ignore_video = true;
450 _have_valid_pieces = false;
454 Player::set_ignore_audio ()
456 _ignore_audio = true;
457 _have_valid_pieces = false;
461 Player::set_ignore_text ()
466 /** Set the player to always burn open texts into the image regardless of the content settings */
468 Player::set_always_burn_open_subtitles ()
470 _always_burn_open_subtitles = true;
473 /** Sets up the player to be faster, possibly at the expense of quality */
478 _have_valid_pieces = false;
482 Player::set_play_referenced ()
484 _play_referenced = true;
485 _have_valid_pieces = false;
488 list<ReferencedReelAsset>
489 Player::get_reel_assets ()
491 list<ReferencedReelAsset> a;
493 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
494 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
499 scoped_ptr<DCPDecoder> decoder;
501 decoder.reset (new DCPDecoder (j, _film->log(), false));
507 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
509 DCPOMATIC_ASSERT (j->video_frame_rate ());
510 double const cfr = j->video_frame_rate().get();
511 Frame const trim_start = j->trim_start().frames_round (cfr);
512 Frame const trim_end = j->trim_end().frames_round (cfr);
513 int const ffr = _film->video_frame_rate ();
515 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
516 if (j->reference_video ()) {
517 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
518 DCPOMATIC_ASSERT (ra);
519 ra->set_entry_point (ra->entry_point() + trim_start);
520 ra->set_duration (ra->duration() - trim_start - trim_end);
522 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
526 if (j->reference_audio ()) {
527 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
528 DCPOMATIC_ASSERT (ra);
529 ra->set_entry_point (ra->entry_point() + trim_start);
530 ra->set_duration (ra->duration() - trim_start - trim_end);
532 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
536 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
537 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
538 DCPOMATIC_ASSERT (ra);
539 ra->set_entry_point (ra->entry_point() + trim_start);
540 ra->set_duration (ra->duration() - trim_start - trim_end);
542 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
546 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
547 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
548 DCPOMATIC_ASSERT (ra);
549 ra->set_entry_point (ra->entry_point() + trim_start);
550 ra->set_duration (ra->duration() - trim_start - trim_end);
552 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
556 /* Assume that main picture duration is the length of the reel */
557 offset += k->main_picture()->duration ();
567 if (!_have_valid_pieces) {
571 if (_playlist->length() == DCPTime()) {
572 /* Special case of an empty Film; just give one black frame */
573 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
577 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
579 shared_ptr<Piece> earliest_content;
580 optional<DCPTime> earliest_time;
582 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
587 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
588 if (t > i->content->end()) {
592 /* Given two choices at the same time, pick the one with texts so we see it before
595 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
597 earliest_content = i;
611 if (earliest_content) {
615 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
616 earliest_time = _black.position ();
620 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
621 earliest_time = _silent.position ();
627 earliest_content->done = earliest_content->decoder->pass ();
630 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
631 _black.set_position (_black.position() + one_video_frame());
635 DCPTimePeriod period (_silent.period_at_position());
636 if (_last_audio_time) {
637 /* Sometimes the thing that happened last finishes fractionally before
638 this silence. Bodge the start time of the silence to fix it. I'm
639 not sure if this is the right solution --- maybe the last thing should
640 be padded `forward' rather than this thing padding `back'.
642 period.from = min(period.from, *_last_audio_time);
644 if (period.duration() > one_video_frame()) {
645 period.to = period.from + one_video_frame();
648 _silent.set_position (period.to);
656 /* Emit any audio that is ready */
658 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
659 of our streams, or the position of the _silent.
661 DCPTime pull_to = _film->length ();
662 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
663 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
664 pull_to = i->second.last_push_end;
667 if (!_silent.done() && _silent.position() < pull_to) {
668 pull_to = _silent.position();
671 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
672 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
673 if (_last_audio_time && i->second < *_last_audio_time) {
674 /* This new data comes before the last we emitted (or the last seek); discard it */
675 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
680 } else if (_last_audio_time && i->second > *_last_audio_time) {
681 /* There's a gap between this data and the last we emitted; fill with silence */
682 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
685 emit_audio (i->first, i->second);
690 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
691 do_emit_video(i->first, i->second);
699 Player::closed_captions_for_frame (DCPTime time) const
701 return _active_texts[TEXT_CLOSED_CAPTION].get (
702 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
706 /** @return Open subtitles for the frame at the given time, converted to images */
707 optional<PositionImage>
708 Player::open_subtitles_for_frame (DCPTime time) const
710 list<PositionImage> captions;
711 int const vfr = _film->video_frame_rate();
715 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
718 /* Image subtitles */
719 list<PositionImage> c = transform_bitmap_texts (j.image);
720 copy (c.begin(), c.end(), back_inserter (captions));
722 /* Text subtitles (rendered to an image) */
723 if (!j.text.empty ()) {
724 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
725 copy (s.begin(), s.end(), back_inserter (captions));
729 if (captions.empty ()) {
730 return optional<PositionImage> ();
733 return merge (captions);
737 Player::video (weak_ptr<Piece> wp, ContentVideo video)
739 shared_ptr<Piece> piece = wp.lock ();
744 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
745 if (frc.skip && (video.frame % 2) == 1) {
749 /* Time of the first frame we will emit */
750 DCPTime const time = content_video_to_dcp (piece, video.frame);
752 /* Discard if it's before the content's period or the last accurate seek. We can't discard
753 if it's after the content's period here as in that case we still need to fill any gap between
754 `now' and the end of the content's period.
756 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
760 /* Fill gaps that we discover now that we have some video which needs to be emitted.
761 This is where we need to fill to.
763 DCPTime fill_to = min (time, piece->content->end());
765 if (_last_video_time) {
766 DCPTime fill_from = max (*_last_video_time, piece->content->position());
767 LastVideoMap::const_iterator last = _last_video.find (wp);
768 if (_film->three_d()) {
769 DCPTime j = fill_from;
770 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
771 if (eyes == EYES_BOTH) {
774 while (j < fill_to || eyes != video.eyes) {
775 if (last != _last_video.end()) {
776 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
777 copy->set_eyes (eyes);
778 emit_video (copy, j);
780 emit_video (black_player_video_frame(eyes), j);
782 if (eyes == EYES_RIGHT) {
783 j += one_video_frame();
785 eyes = increment_eyes (eyes);
788 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
789 if (last != _last_video.end()) {
790 emit_video (last->second, j);
792 emit_video (black_player_video_frame(EYES_BOTH), j);
798 _last_video[wp].reset (
801 piece->content->video->crop (),
802 piece->content->video->fade (video.frame),
803 piece->content->video->scale().size (
804 piece->content->video, _video_container_size, _film->frame_size ()
806 _video_container_size,
809 piece->content->video->colour_conversion(),
816 for (int i = 0; i < frc.repeat; ++i) {
817 if (t < piece->content->end()) {
818 emit_video (_last_video[wp], t);
820 t += one_video_frame ();
825 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
827 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
829 shared_ptr<Piece> piece = wp.lock ();
834 shared_ptr<AudioContent> content = piece->content->audio;
835 DCPOMATIC_ASSERT (content);
837 /* Compute time in the DCP */
838 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
839 /* And the end of this block in the DCP */
840 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
842 /* Remove anything that comes before the start or after the end of the content */
843 if (time < piece->content->position()) {
844 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
846 /* This audio is entirely discarded */
849 content_audio.audio = cut.first;
851 } else if (time > piece->content->end()) {
854 } else if (end > piece->content->end()) {
855 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
856 if (remaining_frames == 0) {
859 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
860 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
861 content_audio.audio = cut;
864 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
868 if (content->gain() != 0) {
869 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
870 gain->apply_gain (content->gain ());
871 content_audio.audio = gain;
876 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
880 if (_audio_processor) {
881 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
886 _audio_merger.push (content_audio.audio, time);
887 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
888 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
892 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
894 shared_ptr<Piece> piece = wp.lock ();
895 shared_ptr<const TextContent> text = wc.lock ();
896 if (!piece || !text) {
900 /* Apply content's subtitle offsets */
901 subtitle.sub.rectangle.x += text->x_offset ();
902 subtitle.sub.rectangle.y += text->y_offset ();
904 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
905 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
906 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
908 /* Apply content's subtitle scale */
909 subtitle.sub.rectangle.width *= text->x_scale ();
910 subtitle.sub.rectangle.height *= text->y_scale ();
913 ps.image.push_back (subtitle.sub);
914 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
916 _active_texts[subtitle.type()].add_from (wc, ps, from);
920 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
922 shared_ptr<Piece> piece = wp.lock ();
923 shared_ptr<const TextContent> text = wc.lock ();
924 if (!piece || !text) {
929 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
931 if (from > piece->content->end()) {
935 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
936 s.set_h_position (s.h_position() + text->x_offset ());
937 s.set_v_position (s.v_position() + text->y_offset ());
938 float const xs = text->x_scale();
939 float const ys = text->y_scale();
940 float size = s.size();
942 /* Adjust size to express the common part of the scaling;
943 e.g. if xs = ys = 0.5 we scale size by 2.
945 if (xs > 1e-5 && ys > 1e-5) {
946 size *= 1 / min (1 / xs, 1 / ys);
950 /* Then express aspect ratio changes */
951 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
952 s.set_aspect_adjust (xs / ys);
955 s.set_in (dcp::Time(from.seconds(), 1000));
956 ps.text.push_back (StringText (s, text->outline_width()));
957 ps.add_fonts (text->fonts ());
960 _active_texts[subtitle.type()].add_from (wc, ps, from);
964 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
966 if (!_active_texts[type].have (wc)) {
970 shared_ptr<Piece> piece = wp.lock ();
971 shared_ptr<const TextContent> text = wc.lock ();
972 if (!piece || !text) {
976 DCPTime const dcp_to = content_time_to_dcp (piece, to);
978 if (dcp_to > piece->content->end()) {
982 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
984 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
985 if (text->use() && !always && !text->burn()) {
986 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
991 Player::seek (DCPTime time, bool accurate)
993 if (!_have_valid_pieces) {
1003 if (_audio_processor) {
1004 _audio_processor->flush ();
1007 _audio_merger.clear ();
1008 for (int i = 0; i < TEXT_COUNT; ++i) {
1009 _active_texts[i].clear ();
1012 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1013 if (time < i->content->position()) {
1014 /* Before; seek to the start of the content */
1015 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1017 } else if (i->content->position() <= time && time < i->content->end()) {
1018 /* During; seek to position */
1019 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1022 /* After; this piece is done */
1028 _last_video_time = time;
1029 _last_video_eyes = EYES_LEFT;
1030 _last_audio_time = time;
1032 _last_video_time = optional<DCPTime>();
1033 _last_video_eyes = optional<Eyes>();
1034 _last_audio_time = optional<DCPTime>();
1037 _black.set_position (time);
1038 _silent.set_position (time);
1040 _last_video.clear ();
1044 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1046 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1047 player before the video that requires them.
1049 _delay.push_back (make_pair (pv, time));
1051 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1052 _last_video_time = time + one_video_frame();
1054 _last_video_eyes = increment_eyes (pv->eyes());
1056 if (_delay.size() < 3) {
1060 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1062 do_emit_video (to_do.first, to_do.second);
1066 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1068 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1069 for (int i = 0; i < TEXT_COUNT; ++i) {
1070 _active_texts[i].clear_before (time);
1074 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1076 pv->set_text (subtitles.get ());
1083 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1085 /* Log if the assert below is about to fail */
1086 if (_last_audio_time && time != *_last_audio_time) {
1087 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1090 /* This audio must follow on from the previous */
1091 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1093 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1097 Player::fill_audio (DCPTimePeriod period)
1099 if (period.from == period.to) {
1103 DCPOMATIC_ASSERT (period.from < period.to);
1105 DCPTime t = period.from;
1106 while (t < period.to) {
1107 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1108 Frame const samples = block.frames_round(_film->audio_frame_rate());
1110 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1111 silence->make_silent ();
1112 emit_audio (silence, t);
1119 Player::one_video_frame () const
1121 return DCPTime::from_frames (1, _film->video_frame_rate ());
1124 pair<shared_ptr<AudioBuffers>, DCPTime>
1125 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1127 DCPTime const discard_time = discard_to - time;
1128 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1129 Frame remaining_frames = audio->frames() - discard_frames;
1130 if (remaining_frames <= 0) {
1131 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1133 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1134 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1135 return make_pair(cut, time + discard_time);
1139 Player::set_dcp_decode_reduction (optional<int> reduction)
1141 if (reduction == _dcp_decode_reduction) {
1145 _dcp_decode_reduction = reduction;
1146 _have_valid_pieces = false;
1147 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1151 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1153 if (_have_valid_pieces) {
1157 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1158 if (i->content == content) {
1159 return content_time_to_dcp (i, t);
1163 DCPOMATIC_ASSERT (false);