2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::IGNORE = 705;
87 int const PlayerProperty::FAST = 706;
88 int const PlayerProperty::PLAY_REFERENCED = 707;
90 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
92 , _playlist (playlist)
93 , _have_valid_pieces (false)
94 , _ignore_video (false)
95 , _ignore_audio (false)
96 , _ignore_text (false)
97 , _always_burn_open_subtitles (false)
99 , _play_referenced (false)
100 , _audio_merger (_film->audio_frame_rate())
103 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
104 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
105 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
106 set_video_container_size (_film->frame_size ());
108 film_changed (Film::AUDIO_PROCESSOR);
110 seek (DCPTime (), true);
119 Player::setup_pieces ()
124 _shuffler = new Shuffler();
125 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
127 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
129 if (!i->paths_valid ()) {
133 if (_ignore_video && _ignore_audio && i->text.empty()) {
134 /* We're only interested in text and this content has none */
138 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
139 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
142 /* Not something that we can decode; e.g. Atmos content */
146 if (decoder->video && _ignore_video) {
147 decoder->video->set_ignore (true);
150 if (decoder->audio && _ignore_audio) {
151 decoder->audio->set_ignore (true);
155 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
156 i->set_ignore (true);
160 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
162 dcp->set_decode_referenced (_play_referenced);
163 if (_play_referenced) {
164 dcp->set_forced_reduction (_dcp_decode_reduction);
168 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
169 _pieces.push_back (piece);
171 if (decoder->video) {
172 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
173 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
174 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
176 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
180 if (decoder->audio) {
181 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
184 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
186 while (j != decoder->text.end()) {
187 (*j)->BitmapStart.connect (
188 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
190 (*j)->PlainStart.connect (
191 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
194 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
201 _stream_states.clear ();
202 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
203 if (i->content->audio) {
204 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
205 _stream_states[j] = StreamState (i, i->content->position ());
210 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
211 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
213 _last_video_time = DCPTime ();
214 _last_video_eyes = EYES_BOTH;
215 _last_audio_time = DCPTime ();
216 _have_valid_pieces = true;
220 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
222 shared_ptr<Content> c = w.lock ();
228 property == ContentProperty::POSITION ||
229 property == ContentProperty::LENGTH ||
230 property == ContentProperty::TRIM_START ||
231 property == ContentProperty::TRIM_END ||
232 property == ContentProperty::PATH ||
233 property == VideoContentProperty::FRAME_TYPE ||
234 property == VideoContentProperty::COLOUR_CONVERSION ||
235 property == AudioContentProperty::STREAMS ||
236 property == DCPContentProperty::NEEDS_ASSETS ||
237 property == DCPContentProperty::NEEDS_KDM ||
238 property == TextContentProperty::COLOUR ||
239 property == TextContentProperty::EFFECT ||
240 property == TextContentProperty::EFFECT_COLOUR ||
241 property == FFmpegContentProperty::SUBTITLE_STREAM ||
242 property == FFmpegContentProperty::FILTERS
246 boost::mutex::scoped_lock lm (_mutex);
247 _have_valid_pieces = false;
250 Changed (property, frequent);
253 property == TextContentProperty::LINE_SPACING ||
254 property == TextContentProperty::OUTLINE_WIDTH ||
255 property == TextContentProperty::Y_SCALE ||
256 property == TextContentProperty::FADE_IN ||
257 property == TextContentProperty::FADE_OUT ||
258 property == ContentProperty::VIDEO_FRAME_RATE ||
259 property == TextContentProperty::USE ||
260 property == TextContentProperty::X_OFFSET ||
261 property == TextContentProperty::Y_OFFSET ||
262 property == TextContentProperty::X_SCALE ||
263 property == TextContentProperty::FONTS ||
264 property == TextContentProperty::TYPE ||
265 property == VideoContentProperty::CROP ||
266 property == VideoContentProperty::SCALE ||
267 property == VideoContentProperty::FADE_IN ||
268 property == VideoContentProperty::FADE_OUT
271 Changed (property, frequent);
276 Player::set_video_container_size (dcp::Size s)
279 boost::mutex::scoped_lock lm (_mutex);
281 if (s == _video_container_size) {
285 _video_container_size = s;
287 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
288 _black_image->make_black ();
291 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
295 Player::playlist_changed ()
298 boost::mutex::scoped_lock lm (_mutex);
299 _have_valid_pieces = false;
302 Changed (PlayerProperty::PLAYLIST, false);
306 Player::film_changed (Film::Property p)
308 /* Here we should notice Film properties that affect our output, and
309 alert listeners that our output now would be different to how it was
310 last time we were run.
313 if (p == Film::CONTAINER) {
314 Changed (PlayerProperty::FILM_CONTAINER, false);
315 } else if (p == Film::VIDEO_FRAME_RATE) {
316 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
317 so we need new pieces here.
320 boost::mutex::scoped_lock lm (_mutex);
321 _have_valid_pieces = false;
323 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
324 } else if (p == Film::AUDIO_PROCESSOR) {
325 if (_film->audio_processor ()) {
326 boost::mutex::scoped_lock lm (_mutex);
327 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
329 } else if (p == Film::AUDIO_CHANNELS) {
330 boost::mutex::scoped_lock lm (_mutex);
331 _audio_merger.clear ();
336 Player::transform_bitmap_texts (list<BitmapText> subs) const
338 list<PositionImage> all;
340 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
345 /* We will scale the subtitle up to fit _video_container_size */
346 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
352 dcp::YUV_TO_RGB_REC601,
353 i->image->pixel_format (),
358 lrint (_video_container_size.width * i->rectangle.x),
359 lrint (_video_container_size.height * i->rectangle.y)
368 shared_ptr<PlayerVideo>
369 Player::black_player_video_frame (Eyes eyes) const
371 return shared_ptr<PlayerVideo> (
373 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
376 _video_container_size,
377 _video_container_size,
380 PresetColourConversion::all().front().conversion,
381 boost::weak_ptr<Content>(),
382 boost::optional<Frame>()
388 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
390 DCPTime s = t - piece->content->position ();
391 s = min (piece->content->length_after_trim(), s);
392 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
394 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
395 then convert that ContentTime to frames at the content's rate. However this fails for
396 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
397 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
399 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
401 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
405 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
407 /* See comment in dcp_to_content_video */
408 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
409 return d + piece->content->position();
413 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
415 DCPTime s = t - piece->content->position ();
416 s = min (piece->content->length_after_trim(), s);
417 /* See notes in dcp_to_content_video */
418 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
422 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
424 /* See comment in dcp_to_content_video */
425 return DCPTime::from_frames (f, _film->audio_frame_rate())
426 - DCPTime (piece->content->trim_start(), piece->frc)
427 + piece->content->position();
431 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
433 DCPTime s = t - piece->content->position ();
434 s = min (piece->content->length_after_trim(), s);
435 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
439 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
441 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
444 list<shared_ptr<Font> >
445 Player::get_subtitle_fonts ()
447 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
449 if (!_have_valid_pieces) {
453 list<shared_ptr<Font> > fonts;
454 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
455 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
456 /* XXX: things may go wrong if there are duplicate font IDs
457 with different font files.
459 list<shared_ptr<Font> > f = j->fonts ();
460 copy (f.begin(), f.end(), back_inserter (fonts));
467 /** Set this player never to produce any video data */
469 Player::set_ignore_video ()
472 boost::mutex::scoped_lock lm (_mutex);
473 _ignore_video = true;
474 _have_valid_pieces = false;
477 Changed (PlayerProperty::IGNORE, false);
481 Player::set_ignore_audio ()
484 boost::mutex::scoped_lock lm (_mutex);
485 _ignore_audio = true;
486 _have_valid_pieces = false;
489 Changed (PlayerProperty::IGNORE, false);
493 Player::set_ignore_text ()
496 boost::mutex::scoped_lock lm (_mutex);
498 _have_valid_pieces = false;
501 Changed (PlayerProperty::IGNORE, false);
504 /** Set the player to always burn open texts into the image regardless of the content settings */
506 Player::set_always_burn_open_subtitles ()
508 boost::mutex::scoped_lock lm (_mutex);
509 _always_burn_open_subtitles = true;
512 /** Sets up the player to be faster, possibly at the expense of quality */
517 boost::mutex::scoped_lock lm (_mutex);
519 _have_valid_pieces = false;
522 Changed (PlayerProperty::FAST, false);
526 Player::set_play_referenced ()
529 boost::mutex::scoped_lock lm (_mutex);
530 _play_referenced = true;
531 _have_valid_pieces = false;
534 Changed (PlayerProperty::PLAY_REFERENCED, false);
537 list<ReferencedReelAsset>
538 Player::get_reel_assets ()
540 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
542 list<ReferencedReelAsset> a;
544 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
545 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
550 scoped_ptr<DCPDecoder> decoder;
552 decoder.reset (new DCPDecoder (j, _film->log(), false));
558 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
560 DCPOMATIC_ASSERT (j->video_frame_rate ());
561 double const cfr = j->video_frame_rate().get();
562 Frame const trim_start = j->trim_start().frames_round (cfr);
563 Frame const trim_end = j->trim_end().frames_round (cfr);
564 int const ffr = _film->video_frame_rate ();
566 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
567 if (j->reference_video ()) {
568 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
569 DCPOMATIC_ASSERT (ra);
570 ra->set_entry_point (ra->entry_point() + trim_start);
571 ra->set_duration (ra->duration() - trim_start - trim_end);
573 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
577 if (j->reference_audio ()) {
578 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
579 DCPOMATIC_ASSERT (ra);
580 ra->set_entry_point (ra->entry_point() + trim_start);
581 ra->set_duration (ra->duration() - trim_start - trim_end);
583 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
587 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
588 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
589 DCPOMATIC_ASSERT (ra);
590 ra->set_entry_point (ra->entry_point() + trim_start);
591 ra->set_duration (ra->duration() - trim_start - trim_end);
593 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
597 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
598 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
599 DCPOMATIC_ASSERT (ra);
600 ra->set_entry_point (ra->entry_point() + trim_start);
601 ra->set_duration (ra->duration() - trim_start - trim_end);
603 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
607 /* Assume that main picture duration is the length of the reel */
608 offset += k->main_picture()->duration ();
618 boost::mutex::scoped_lock lm (_mutex);
620 if (!_have_valid_pieces) {
621 /* This should only happen when we are under the control of the butler. In this case, _have_valid_pieces
622 will be false if something in the Player has changed and we are waiting for the butler to notice
623 and do a seek back to the place we were at before. During this time we don't want pass() to do anything,
624 as just after setup_pieces the new decoders will be back to time 0 until the seek has gone through. Just do nothing
625 here and assume that the seek will be forthcoming.
630 if (_playlist->length() == DCPTime()) {
631 /* Special case of an empty Film; just give one black frame */
632 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
636 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
638 shared_ptr<Piece> earliest_content;
639 optional<DCPTime> earliest_time;
641 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
646 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
647 if (t > i->content->end()) {
651 /* Given two choices at the same time, pick the one with texts so we see it before
654 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
656 earliest_content = i;
670 if (earliest_content) {
674 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
675 earliest_time = _black.position ();
679 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
680 earliest_time = _silent.position ();
686 earliest_content->done = earliest_content->decoder->pass ();
689 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
690 _black.set_position (_black.position() + one_video_frame());
694 DCPTimePeriod period (_silent.period_at_position());
695 if (_last_audio_time) {
696 /* Sometimes the thing that happened last finishes fractionally before
697 this silence. Bodge the start time of the silence to fix it. I'm
698 not sure if this is the right solution --- maybe the last thing should
699 be padded `forward' rather than this thing padding `back'.
701 period.from = min(period.from, *_last_audio_time);
703 if (period.duration() > one_video_frame()) {
704 period.to = period.from + one_video_frame();
707 _silent.set_position (period.to);
715 /* Emit any audio that is ready */
717 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
718 of our streams, or the position of the _silent.
720 DCPTime pull_to = _film->length ();
721 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
722 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
723 pull_to = i->second.last_push_end;
726 if (!_silent.done() && _silent.position() < pull_to) {
727 pull_to = _silent.position();
730 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
731 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
732 if (_last_audio_time && i->second < *_last_audio_time) {
733 /* This new data comes before the last we emitted (or the last seek); discard it */
734 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
739 } else if (_last_audio_time && i->second > *_last_audio_time) {
740 /* There's a gap between this data and the last we emitted; fill with silence */
741 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
744 emit_audio (i->first, i->second);
749 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
750 do_emit_video(i->first, i->second);
757 /** @return Open subtitles for the frame at the given time, converted to images */
758 optional<PositionImage>
759 Player::open_subtitles_for_frame (DCPTime time) const
761 list<PositionImage> captions;
762 int const vfr = _film->video_frame_rate();
766 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
769 /* Bitmap subtitles */
770 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
771 copy (c.begin(), c.end(), back_inserter (captions));
773 /* String subtitles (rendered to an image) */
774 if (!j.string.empty ()) {
775 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
776 copy (s.begin(), s.end(), back_inserter (captions));
780 if (captions.empty ()) {
781 return optional<PositionImage> ();
784 return merge (captions);
788 Player::video (weak_ptr<Piece> wp, ContentVideo video)
790 shared_ptr<Piece> piece = wp.lock ();
795 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
796 if (frc.skip && (video.frame % 2) == 1) {
800 /* Time of the first frame we will emit */
801 DCPTime const time = content_video_to_dcp (piece, video.frame);
803 /* Discard if it's before the content's period or the last accurate seek. We can't discard
804 if it's after the content's period here as in that case we still need to fill any gap between
805 `now' and the end of the content's period.
807 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
811 /* Fill gaps that we discover now that we have some video which needs to be emitted.
812 This is where we need to fill to.
814 DCPTime fill_to = min (time, piece->content->end());
816 if (_last_video_time) {
817 DCPTime fill_from = max (*_last_video_time, piece->content->position());
818 LastVideoMap::const_iterator last = _last_video.find (wp);
819 if (_film->three_d()) {
820 Eyes fill_to_eyes = video.eyes;
821 if (fill_to == piece->content->end()) {
822 /* Don't fill after the end of the content */
823 fill_to_eyes = EYES_LEFT;
825 DCPTime j = fill_from;
826 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
827 if (eyes == EYES_BOTH) {
830 while (j < fill_to || eyes != fill_to_eyes) {
831 if (last != _last_video.end()) {
832 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
833 copy->set_eyes (eyes);
834 emit_video (copy, j);
836 emit_video (black_player_video_frame(eyes), j);
838 if (eyes == EYES_RIGHT) {
839 j += one_video_frame();
841 eyes = increment_eyes (eyes);
844 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
845 if (last != _last_video.end()) {
846 emit_video (last->second, j);
848 emit_video (black_player_video_frame(EYES_BOTH), j);
854 _last_video[wp].reset (
857 piece->content->video->crop (),
858 piece->content->video->fade (video.frame),
859 piece->content->video->scale().size (
860 piece->content->video, _video_container_size, _film->frame_size ()
862 _video_container_size,
865 piece->content->video->colour_conversion(),
872 for (int i = 0; i < frc.repeat; ++i) {
873 if (t < piece->content->end()) {
874 emit_video (_last_video[wp], t);
876 t += one_video_frame ();
881 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
883 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
885 shared_ptr<Piece> piece = wp.lock ();
890 shared_ptr<AudioContent> content = piece->content->audio;
891 DCPOMATIC_ASSERT (content);
893 /* Compute time in the DCP */
894 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
895 /* And the end of this block in the DCP */
896 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
898 /* Remove anything that comes before the start or after the end of the content */
899 if (time < piece->content->position()) {
900 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
902 /* This audio is entirely discarded */
905 content_audio.audio = cut.first;
907 } else if (time > piece->content->end()) {
910 } else if (end > piece->content->end()) {
911 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
912 if (remaining_frames == 0) {
915 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
916 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
917 content_audio.audio = cut;
920 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
924 if (content->gain() != 0) {
925 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
926 gain->apply_gain (content->gain ());
927 content_audio.audio = gain;
932 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
936 if (_audio_processor) {
937 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
942 _audio_merger.push (content_audio.audio, time);
943 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
944 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
948 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
950 shared_ptr<Piece> piece = wp.lock ();
951 shared_ptr<const TextContent> text = wc.lock ();
952 if (!piece || !text) {
956 /* Apply content's subtitle offsets */
957 subtitle.sub.rectangle.x += text->x_offset ();
958 subtitle.sub.rectangle.y += text->y_offset ();
960 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
961 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
962 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
964 /* Apply content's subtitle scale */
965 subtitle.sub.rectangle.width *= text->x_scale ();
966 subtitle.sub.rectangle.height *= text->y_scale ();
969 ps.bitmap.push_back (subtitle.sub);
970 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
972 _active_texts[subtitle.type()].add_from (wc, ps, from);
976 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
978 shared_ptr<Piece> piece = wp.lock ();
979 shared_ptr<const TextContent> text = wc.lock ();
980 if (!piece || !text) {
985 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
987 if (from > piece->content->end()) {
991 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
992 s.set_h_position (s.h_position() + text->x_offset ());
993 s.set_v_position (s.v_position() + text->y_offset ());
994 float const xs = text->x_scale();
995 float const ys = text->y_scale();
996 float size = s.size();
998 /* Adjust size to express the common part of the scaling;
999 e.g. if xs = ys = 0.5 we scale size by 2.
1001 if (xs > 1e-5 && ys > 1e-5) {
1002 size *= 1 / min (1 / xs, 1 / ys);
1006 /* Then express aspect ratio changes */
1007 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1008 s.set_aspect_adjust (xs / ys);
1011 s.set_in (dcp::Time(from.seconds(), 1000));
1012 ps.string.push_back (StringText (s, text->outline_width()));
1013 ps.add_fonts (text->fonts ());
1016 _active_texts[subtitle.type()].add_from (wc, ps, from);
1020 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
1022 if (!_active_texts[type].have (wc)) {
1026 shared_ptr<Piece> piece = wp.lock ();
1027 shared_ptr<const TextContent> text = wc.lock ();
1028 if (!piece || !text) {
1032 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1034 if (dcp_to > piece->content->end()) {
1038 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1040 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1041 if (text->use() && !always && !text->burn()) {
1042 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1047 Player::seek (DCPTime time, bool accurate)
1049 boost::mutex::scoped_lock lm (_mutex);
1051 if (!_have_valid_pieces) {
1056 _shuffler->clear ();
1061 if (_audio_processor) {
1062 _audio_processor->flush ();
1065 _audio_merger.clear ();
1066 for (int i = 0; i < TEXT_COUNT; ++i) {
1067 _active_texts[i].clear ();
1070 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1071 if (time < i->content->position()) {
1072 /* Before; seek to the start of the content */
1073 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1075 } else if (i->content->position() <= time && time < i->content->end()) {
1076 /* During; seek to position */
1077 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1080 /* After; this piece is done */
1086 _last_video_time = time;
1087 _last_video_eyes = EYES_LEFT;
1088 _last_audio_time = time;
1090 _last_video_time = optional<DCPTime>();
1091 _last_video_eyes = optional<Eyes>();
1092 _last_audio_time = optional<DCPTime>();
1095 _black.set_position (time);
1096 _silent.set_position (time);
1098 _last_video.clear ();
1102 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1104 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1105 player before the video that requires them.
1107 _delay.push_back (make_pair (pv, time));
1109 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1110 _last_video_time = time + one_video_frame();
1112 _last_video_eyes = increment_eyes (pv->eyes());
1114 if (_delay.size() < 3) {
1118 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1120 do_emit_video (to_do.first, to_do.second);
1124 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1126 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1127 for (int i = 0; i < TEXT_COUNT; ++i) {
1128 _active_texts[i].clear_before (time);
1132 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1134 pv->set_text (subtitles.get ());
1141 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1143 /* Log if the assert below is about to fail */
1144 if (_last_audio_time && time != *_last_audio_time) {
1145 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1148 /* This audio must follow on from the previous */
1149 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1151 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1155 Player::fill_audio (DCPTimePeriod period)
1157 if (period.from == period.to) {
1161 DCPOMATIC_ASSERT (period.from < period.to);
1163 DCPTime t = period.from;
1164 while (t < period.to) {
1165 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1166 Frame const samples = block.frames_round(_film->audio_frame_rate());
1168 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1169 silence->make_silent ();
1170 emit_audio (silence, t);
1177 Player::one_video_frame () const
1179 return DCPTime::from_frames (1, _film->video_frame_rate ());
1182 pair<shared_ptr<AudioBuffers>, DCPTime>
1183 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1185 DCPTime const discard_time = discard_to - time;
1186 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1187 Frame remaining_frames = audio->frames() - discard_frames;
1188 if (remaining_frames <= 0) {
1189 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1191 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1192 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1193 return make_pair(cut, time + discard_time);
1197 Player::set_dcp_decode_reduction (optional<int> reduction)
1200 boost::mutex::scoped_lock lm (_mutex);
1202 if (reduction == _dcp_decode_reduction) {
1206 _dcp_decode_reduction = reduction;
1207 _have_valid_pieces = false;
1210 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1214 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1216 boost::mutex::scoped_lock lm (_mutex);
1218 if (_have_valid_pieces) {
1222 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1223 if (i->content == content) {
1224 return content_time_to_dcp (i, t);
1228 DCPOMATIC_ASSERT (false);