2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_text (false)
93 , _always_burn_open_subtitles (false)
95 , _play_referenced (false)
96 , _audio_merger (_film->audio_frame_rate())
99 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102 set_video_container_size (_film->frame_size ());
104 film_changed (Film::AUDIO_PROCESSOR);
106 seek (DCPTime (), true);
115 Player::setup_pieces ()
120 _shuffler = new Shuffler();
121 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
123 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125 if (!i->paths_valid ()) {
129 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
133 /* Not something that we can decode; e.g. Atmos content */
137 if (decoder->video && _ignore_video) {
138 decoder->video->set_ignore (true);
142 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
143 i->set_ignore (true);
147 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
149 dcp->set_decode_referenced (_play_referenced);
150 if (_play_referenced) {
151 dcp->set_forced_reduction (_dcp_decode_reduction);
155 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
156 _pieces.push_back (piece);
158 if (decoder->video) {
159 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
160 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
161 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
163 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
167 if (decoder->audio) {
168 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
171 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
173 while (j != decoder->text.end()) {
174 (*j)->BitmapStart.connect (
175 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
177 (*j)->PlainStart.connect (
178 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
181 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
188 _stream_states.clear ();
189 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
190 if (i->content->audio) {
191 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
192 _stream_states[j] = StreamState (i, i->content->position ());
197 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
198 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
200 _last_video_time = DCPTime ();
201 _last_video_eyes = EYES_BOTH;
202 _last_audio_time = DCPTime ();
203 _have_valid_pieces = true;
207 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
209 shared_ptr<Content> c = w.lock ();
215 property == ContentProperty::POSITION ||
216 property == ContentProperty::LENGTH ||
217 property == ContentProperty::TRIM_START ||
218 property == ContentProperty::TRIM_END ||
219 property == ContentProperty::PATH ||
220 property == VideoContentProperty::FRAME_TYPE ||
221 property == VideoContentProperty::COLOUR_CONVERSION ||
222 property == AudioContentProperty::STREAMS ||
223 property == DCPContentProperty::NEEDS_ASSETS ||
224 property == DCPContentProperty::NEEDS_KDM ||
225 property == TextContentProperty::COLOUR ||
226 property == TextContentProperty::EFFECT ||
227 property == TextContentProperty::EFFECT_COLOUR ||
228 property == FFmpegContentProperty::SUBTITLE_STREAM ||
229 property == FFmpegContentProperty::FILTERS
232 _have_valid_pieces = false;
233 Changed (property, frequent);
236 property == TextContentProperty::LINE_SPACING ||
237 property == TextContentProperty::OUTLINE_WIDTH ||
238 property == TextContentProperty::Y_SCALE ||
239 property == TextContentProperty::FADE_IN ||
240 property == TextContentProperty::FADE_OUT ||
241 property == ContentProperty::VIDEO_FRAME_RATE ||
242 property == TextContentProperty::USE ||
243 property == TextContentProperty::X_OFFSET ||
244 property == TextContentProperty::Y_OFFSET ||
245 property == TextContentProperty::X_SCALE ||
246 property == TextContentProperty::FONTS ||
247 property == TextContentProperty::TYPE ||
248 property == VideoContentProperty::CROP ||
249 property == VideoContentProperty::SCALE ||
250 property == VideoContentProperty::FADE_IN ||
251 property == VideoContentProperty::FADE_OUT
254 Changed (property, frequent);
259 Player::set_video_container_size (dcp::Size s)
261 if (s == _video_container_size) {
265 _video_container_size = s;
267 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
268 _black_image->make_black ();
270 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
274 Player::playlist_changed ()
276 _have_valid_pieces = false;
277 Changed (PlayerProperty::PLAYLIST, false);
281 Player::film_changed (Film::Property p)
283 /* Here we should notice Film properties that affect our output, and
284 alert listeners that our output now would be different to how it was
285 last time we were run.
288 if (p == Film::CONTAINER) {
289 Changed (PlayerProperty::FILM_CONTAINER, false);
290 } else if (p == Film::VIDEO_FRAME_RATE) {
291 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
292 so we need new pieces here.
294 _have_valid_pieces = false;
295 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
296 } else if (p == Film::AUDIO_PROCESSOR) {
297 if (_film->audio_processor ()) {
298 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
300 } else if (p == Film::AUDIO_CHANNELS) {
301 _audio_merger.clear ();
306 Player::transform_bitmap_texts (list<BitmapText> subs) const
308 list<PositionImage> all;
310 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
315 /* We will scale the subtitle up to fit _video_container_size */
316 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
322 dcp::YUV_TO_RGB_REC601,
323 i->image->pixel_format (),
328 lrint (_video_container_size.width * i->rectangle.x),
329 lrint (_video_container_size.height * i->rectangle.y)
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
341 return shared_ptr<PlayerVideo> (
343 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
346 _video_container_size,
347 _video_container_size,
350 PresetColourConversion::all().front().conversion,
351 boost::weak_ptr<Content>(),
352 boost::optional<Frame>()
358 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
360 DCPTime s = t - piece->content->position ();
361 s = min (piece->content->length_after_trim(), s);
362 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
364 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
365 then convert that ContentTime to frames at the content's rate. However this fails for
366 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
367 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
369 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
371 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
375 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 /* See comment in dcp_to_content_video */
378 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
379 return d + piece->content->position();
383 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
385 DCPTime s = t - piece->content->position ();
386 s = min (piece->content->length_after_trim(), s);
387 /* See notes in dcp_to_content_video */
388 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
392 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
394 /* See comment in dcp_to_content_video */
395 return DCPTime::from_frames (f, _film->audio_frame_rate())
396 - DCPTime (piece->content->trim_start(), piece->frc)
397 + piece->content->position();
401 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
403 DCPTime s = t - piece->content->position ();
404 s = min (piece->content->length_after_trim(), s);
405 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
409 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
411 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
414 list<shared_ptr<Font> >
415 Player::get_subtitle_fonts ()
417 if (!_have_valid_pieces) {
421 list<shared_ptr<Font> > fonts;
422 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
423 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
424 /* XXX: things may go wrong if there are duplicate font IDs
425 with different font files.
427 list<shared_ptr<Font> > f = j->fonts ();
428 copy (f.begin(), f.end(), back_inserter (fonts));
435 /** Set this player never to produce any video data */
437 Player::set_ignore_video ()
439 _ignore_video = true;
443 Player::set_ignore_text ()
448 /** Set the player to always burn open texts into the image regardless of the content settings */
450 Player::set_always_burn_open_subtitles ()
452 _always_burn_open_subtitles = true;
455 /** Sets up the player to be faster, possibly at the expense of quality */
460 _have_valid_pieces = false;
464 Player::set_play_referenced ()
466 _play_referenced = true;
467 _have_valid_pieces = false;
470 list<ReferencedReelAsset>
471 Player::get_reel_assets ()
473 list<ReferencedReelAsset> a;
475 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
476 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
481 scoped_ptr<DCPDecoder> decoder;
483 decoder.reset (new DCPDecoder (j, _film->log(), false));
489 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
491 DCPOMATIC_ASSERT (j->video_frame_rate ());
492 double const cfr = j->video_frame_rate().get();
493 Frame const trim_start = j->trim_start().frames_round (cfr);
494 Frame const trim_end = j->trim_end().frames_round (cfr);
495 int const ffr = _film->video_frame_rate ();
497 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
498 if (j->reference_video ()) {
499 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
500 DCPOMATIC_ASSERT (ra);
501 ra->set_entry_point (ra->entry_point() + trim_start);
502 ra->set_duration (ra->duration() - trim_start - trim_end);
504 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
508 if (j->reference_audio ()) {
509 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
510 DCPOMATIC_ASSERT (ra);
511 ra->set_entry_point (ra->entry_point() + trim_start);
512 ra->set_duration (ra->duration() - trim_start - trim_end);
514 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
518 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
519 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
520 DCPOMATIC_ASSERT (ra);
521 ra->set_entry_point (ra->entry_point() + trim_start);
522 ra->set_duration (ra->duration() - trim_start - trim_end);
524 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
528 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
529 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
530 DCPOMATIC_ASSERT (ra);
531 ra->set_entry_point (ra->entry_point() + trim_start);
532 ra->set_duration (ra->duration() - trim_start - trim_end);
534 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
538 /* Assume that main picture duration is the length of the reel */
539 offset += k->main_picture()->duration ();
549 if (!_have_valid_pieces) {
553 if (_playlist->length() == DCPTime()) {
554 /* Special case of an empty Film; just give one black frame */
555 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
559 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
561 shared_ptr<Piece> earliest_content;
562 optional<DCPTime> earliest_time;
564 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
569 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
570 if (t > i->content->end()) {
574 /* Given two choices at the same time, pick the one with texts so we see it before
577 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
579 earliest_content = i;
593 if (earliest_content) {
597 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
598 earliest_time = _black.position ();
602 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
603 earliest_time = _silent.position ();
609 earliest_content->done = earliest_content->decoder->pass ();
612 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
613 _black.set_position (_black.position() + one_video_frame());
617 DCPTimePeriod period (_silent.period_at_position());
618 if (_last_audio_time) {
619 /* Sometimes the thing that happened last finishes fractionally before
620 this silence. Bodge the start time of the silence to fix it. I'm
621 not sure if this is the right solution --- maybe the last thing should
622 be padded `forward' rather than this thing padding `back'.
624 period.from = min(period.from, *_last_audio_time);
626 if (period.duration() > one_video_frame()) {
627 period.to = period.from + one_video_frame();
630 _silent.set_position (period.to);
638 /* Emit any audio that is ready */
640 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
641 of our streams, or the position of the _silent.
643 DCPTime pull_to = _film->length ();
644 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
645 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
646 pull_to = i->second.last_push_end;
649 if (!_silent.done() && _silent.position() < pull_to) {
650 pull_to = _silent.position();
653 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
654 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
655 if (_last_audio_time && i->second < *_last_audio_time) {
656 /* This new data comes before the last we emitted (or the last seek); discard it */
657 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
662 } else if (_last_audio_time && i->second > *_last_audio_time) {
663 /* There's a gap between this data and the last we emitted; fill with silence */
664 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
667 emit_audio (i->first, i->second);
672 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
673 do_emit_video(i->first, i->second);
681 Player::closed_captions_for_frame (DCPTime time) const
683 return _active_texts[TEXT_CLOSED_CAPTION].get (
684 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
688 /** @return Open subtitles for the frame at the given time, converted to images */
689 optional<PositionImage>
690 Player::open_subtitles_for_frame (DCPTime time) const
692 list<PositionImage> captions;
693 int const vfr = _film->video_frame_rate();
697 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
700 /* Image subtitles */
701 list<PositionImage> c = transform_bitmap_texts (j.image);
702 copy (c.begin(), c.end(), back_inserter (captions));
704 /* Text subtitles (rendered to an image) */
705 if (!j.text.empty ()) {
706 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
707 copy (s.begin(), s.end(), back_inserter (captions));
711 if (captions.empty ()) {
712 return optional<PositionImage> ();
715 return merge (captions);
719 Player::video (weak_ptr<Piece> wp, ContentVideo video)
721 shared_ptr<Piece> piece = wp.lock ();
726 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
727 if (frc.skip && (video.frame % 2) == 1) {
731 /* Time of the first frame we will emit */
732 DCPTime const time = content_video_to_dcp (piece, video.frame);
734 /* Discard if it's before the content's period or the last accurate seek. We can't discard
735 if it's after the content's period here as in that case we still need to fill any gap between
736 `now' and the end of the content's period.
738 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
742 /* Fill gaps that we discover now that we have some video which needs to be emitted.
743 This is where we need to fill to.
745 DCPTime fill_to = min (time, piece->content->end());
747 if (_last_video_time) {
748 DCPTime fill_from = max (*_last_video_time, piece->content->position());
749 LastVideoMap::const_iterator last = _last_video.find (wp);
750 if (_film->three_d()) {
751 DCPTime j = fill_from;
752 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
753 if (eyes == EYES_BOTH) {
756 while (j < fill_to || eyes != video.eyes) {
757 if (last != _last_video.end()) {
758 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
759 copy->set_eyes (eyes);
760 emit_video (copy, j);
762 emit_video (black_player_video_frame(eyes), j);
764 if (eyes == EYES_RIGHT) {
765 j += one_video_frame();
767 eyes = increment_eyes (eyes);
770 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
771 if (last != _last_video.end()) {
772 emit_video (last->second, j);
774 emit_video (black_player_video_frame(EYES_BOTH), j);
780 _last_video[wp].reset (
783 piece->content->video->crop (),
784 piece->content->video->fade (video.frame),
785 piece->content->video->scale().size (
786 piece->content->video, _video_container_size, _film->frame_size ()
788 _video_container_size,
791 piece->content->video->colour_conversion(),
798 for (int i = 0; i < frc.repeat; ++i) {
799 if (t < piece->content->end()) {
800 emit_video (_last_video[wp], t);
802 t += one_video_frame ();
807 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
809 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
811 shared_ptr<Piece> piece = wp.lock ();
816 shared_ptr<AudioContent> content = piece->content->audio;
817 DCPOMATIC_ASSERT (content);
819 /* Compute time in the DCP */
820 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
821 /* And the end of this block in the DCP */
822 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
824 /* Remove anything that comes before the start or after the end of the content */
825 if (time < piece->content->position()) {
826 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
828 /* This audio is entirely discarded */
831 content_audio.audio = cut.first;
833 } else if (time > piece->content->end()) {
836 } else if (end > piece->content->end()) {
837 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
838 if (remaining_frames == 0) {
841 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
842 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
843 content_audio.audio = cut;
846 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
850 if (content->gain() != 0) {
851 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
852 gain->apply_gain (content->gain ());
853 content_audio.audio = gain;
858 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
862 if (_audio_processor) {
863 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
868 _audio_merger.push (content_audio.audio, time);
869 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
870 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
874 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
876 shared_ptr<Piece> piece = wp.lock ();
877 shared_ptr<const TextContent> text = wc.lock ();
878 if (!piece || !text) {
882 /* Apply content's subtitle offsets */
883 subtitle.sub.rectangle.x += text->x_offset ();
884 subtitle.sub.rectangle.y += text->y_offset ();
886 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
887 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
888 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
890 /* Apply content's subtitle scale */
891 subtitle.sub.rectangle.width *= text->x_scale ();
892 subtitle.sub.rectangle.height *= text->y_scale ();
895 ps.image.push_back (subtitle.sub);
896 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
898 _active_texts[subtitle.type()].add_from (wc, ps, from);
902 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
904 shared_ptr<Piece> piece = wp.lock ();
905 shared_ptr<const TextContent> text = wc.lock ();
906 if (!piece || !text) {
911 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
913 if (from > piece->content->end()) {
917 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
918 s.set_h_position (s.h_position() + text->x_offset ());
919 s.set_v_position (s.v_position() + text->y_offset ());
920 float const xs = text->x_scale();
921 float const ys = text->y_scale();
922 float size = s.size();
924 /* Adjust size to express the common part of the scaling;
925 e.g. if xs = ys = 0.5 we scale size by 2.
927 if (xs > 1e-5 && ys > 1e-5) {
928 size *= 1 / min (1 / xs, 1 / ys);
932 /* Then express aspect ratio changes */
933 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
934 s.set_aspect_adjust (xs / ys);
937 s.set_in (dcp::Time(from.seconds(), 1000));
938 ps.text.push_back (StringText (s, text->outline_width()));
939 ps.add_fonts (text->fonts ());
942 _active_texts[subtitle.type()].add_from (wc, ps, from);
946 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
948 if (!_active_texts[type].have (wc)) {
952 shared_ptr<Piece> piece = wp.lock ();
953 shared_ptr<const TextContent> text = wc.lock ();
954 if (!piece || !text) {
958 DCPTime const dcp_to = content_time_to_dcp (piece, to);
960 if (dcp_to > piece->content->end()) {
964 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
966 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
967 if (text->use() && !always && !text->burn()) {
968 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
973 Player::seek (DCPTime time, bool accurate)
975 if (!_have_valid_pieces) {
985 if (_audio_processor) {
986 _audio_processor->flush ();
989 _audio_merger.clear ();
990 for (int i = 0; i < TEXT_COUNT; ++i) {
991 _active_texts[i].clear ();
994 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
995 if (time < i->content->position()) {
996 /* Before; seek to the start of the content */
997 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
999 } else if (i->content->position() <= time && time < i->content->end()) {
1000 /* During; seek to position */
1001 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1004 /* After; this piece is done */
1010 _last_video_time = time;
1011 _last_video_eyes = EYES_LEFT;
1012 _last_audio_time = time;
1014 _last_video_time = optional<DCPTime>();
1015 _last_video_eyes = optional<Eyes>();
1016 _last_audio_time = optional<DCPTime>();
1019 _black.set_position (time);
1020 _silent.set_position (time);
1022 _last_video.clear ();
1026 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1028 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1029 player before the video that requires them.
1031 _delay.push_back (make_pair (pv, time));
1033 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1034 _last_video_time = time + one_video_frame();
1036 _last_video_eyes = increment_eyes (pv->eyes());
1038 if (_delay.size() < 3) {
1042 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1044 do_emit_video (to_do.first, to_do.second);
1048 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1050 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1051 for (int i = 0; i < TEXT_COUNT; ++i) {
1052 _active_texts[i].clear_before (time);
1056 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1058 pv->set_text (subtitles.get ());
1065 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1067 /* Log if the assert below is about to fail */
1068 if (_last_audio_time && time != *_last_audio_time) {
1069 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1072 /* This audio must follow on from the previous */
1073 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1075 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1079 Player::fill_audio (DCPTimePeriod period)
1081 if (period.from == period.to) {
1085 DCPOMATIC_ASSERT (period.from < period.to);
1087 DCPTime t = period.from;
1088 while (t < period.to) {
1089 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1090 Frame const samples = block.frames_round(_film->audio_frame_rate());
1092 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1093 silence->make_silent ();
1094 emit_audio (silence, t);
1101 Player::one_video_frame () const
1103 return DCPTime::from_frames (1, _film->video_frame_rate ());
1106 pair<shared_ptr<AudioBuffers>, DCPTime>
1107 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1109 DCPTime const discard_time = discard_to - time;
1110 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1111 Frame remaining_frames = audio->frames() - discard_frames;
1112 if (remaining_frames <= 0) {
1113 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1115 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1116 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1117 return make_pair(cut, time + discard_time);
1121 Player::set_dcp_decode_reduction (optional<int> reduction)
1123 if (reduction == _dcp_decode_reduction) {
1127 _dcp_decode_reduction = reduction;
1128 _have_valid_pieces = false;
1129 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1133 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1135 if (_have_valid_pieces) {
1139 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1140 if (i->content == content) {
1141 return content_time_to_dcp (i, t);
1145 DCPOMATIC_ASSERT (false);