2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1));
102 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
103 set_video_container_size (_film->frame_size ());
105 film_changed (Film::AUDIO_PROCESSOR);
108 seek (DCPTime (), true);
117 Player::setup_pieces ()
119 boost::mutex::scoped_lock lm (_mutex);
120 setup_pieces_unlocked ();
124 Player::setup_pieces_unlocked ()
129 _shuffler = new Shuffler();
130 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
132 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
134 if (!i->paths_valid ()) {
138 if (_ignore_video && _ignore_audio && i->text.empty()) {
139 /* We're only interested in text and this content has none */
143 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
144 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
147 /* Not something that we can decode; e.g. Atmos content */
151 if (decoder->video && _ignore_video) {
152 decoder->video->set_ignore (true);
155 if (decoder->audio && _ignore_audio) {
156 decoder->audio->set_ignore (true);
160 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
161 i->set_ignore (true);
165 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
167 dcp->set_decode_referenced (_play_referenced);
168 if (_play_referenced) {
169 dcp->set_forced_reduction (_dcp_decode_reduction);
173 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
174 _pieces.push_back (piece);
176 if (decoder->video) {
177 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
178 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
179 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
181 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
185 if (decoder->audio) {
186 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
189 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
191 while (j != decoder->text.end()) {
192 (*j)->BitmapStart.connect (
193 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
195 (*j)->PlainStart.connect (
196 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
199 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
206 _stream_states.clear ();
207 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
208 if (i->content->audio) {
209 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
210 _stream_states[j] = StreamState (i, i->content->position ());
215 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
216 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
218 _last_video_time = DCPTime ();
219 _last_video_eyes = EYES_BOTH;
220 _last_audio_time = DCPTime ();
225 Player::playlist_content_change (ChangeType type, int property, bool frequent)
227 if (type == CHANGE_TYPE_PENDING) {
228 boost::mutex::scoped_lock lm (_mutex);
229 /* The player content is probably about to change, so we can't carry on
230 until that has happened and we've rebuilt our pieces. Stop pass()
231 and seek() from working until then.
234 } else if (type == CHANGE_TYPE_DONE) {
235 /* A change in our content has gone through. Re-build our pieces. */
239 Change (type, property, frequent);
243 Player::set_video_container_size (dcp::Size s)
246 boost::mutex::scoped_lock lm (_mutex);
248 if (s == _video_container_size) {
252 _video_container_size = s;
254 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
255 _black_image->make_black ();
258 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
262 Player::playlist_change (ChangeType type)
264 if (type == CHANGE_TYPE_DONE) {
267 Change (type, PlayerProperty::PLAYLIST, false);
271 Player::film_changed (Film::Property p)
273 /* Here we should notice Film properties that affect our output, and
274 alert listeners that our output now would be different to how it was
275 last time we were run.
278 if (p == Film::CONTAINER) {
279 Change (CHANGE_TYPE_PENDING, PlayerProperty::FILM_CONTAINER, false);
280 } else if (p == Film::VIDEO_FRAME_RATE) {
281 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
282 so we need new pieces here.
284 /* XXX: missing PENDING! */
286 Change (CHANGE_TYPE_DONE, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
287 } else if (p == Film::AUDIO_PROCESSOR) {
288 if (_film->audio_processor ()) {
289 boost::mutex::scoped_lock lm (_mutex);
290 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
292 } else if (p == Film::AUDIO_CHANNELS) {
293 boost::mutex::scoped_lock lm (_mutex);
294 _audio_merger.clear ();
299 Player::transform_bitmap_texts (list<BitmapText> subs) const
301 list<PositionImage> all;
303 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
308 /* We will scale the subtitle up to fit _video_container_size */
309 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
315 dcp::YUV_TO_RGB_REC601,
316 i->image->pixel_format (),
321 lrint (_video_container_size.width * i->rectangle.x),
322 lrint (_video_container_size.height * i->rectangle.y)
331 shared_ptr<PlayerVideo>
332 Player::black_player_video_frame (Eyes eyes) const
334 return shared_ptr<PlayerVideo> (
336 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
339 _video_container_size,
340 _video_container_size,
343 PresetColourConversion::all().front().conversion,
344 boost::weak_ptr<Content>(),
345 boost::optional<Frame>()
351 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
353 DCPTime s = t - piece->content->position ();
354 s = min (piece->content->length_after_trim(), s);
355 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
357 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
358 then convert that ContentTime to frames at the content's rate. However this fails for
359 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
360 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
362 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
364 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
368 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
370 /* See comment in dcp_to_content_video */
371 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
372 return d + piece->content->position();
376 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
378 DCPTime s = t - piece->content->position ();
379 s = min (piece->content->length_after_trim(), s);
380 /* See notes in dcp_to_content_video */
381 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
385 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
387 /* See comment in dcp_to_content_video */
388 return DCPTime::from_frames (f, _film->audio_frame_rate())
389 - DCPTime (piece->content->trim_start(), piece->frc)
390 + piece->content->position();
394 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
396 DCPTime s = t - piece->content->position ();
397 s = min (piece->content->length_after_trim(), s);
398 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
402 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
404 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
407 list<shared_ptr<Font> >
408 Player::get_subtitle_fonts ()
410 boost::mutex::scoped_lock lm (_mutex);
412 list<shared_ptr<Font> > fonts;
413 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
414 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
415 /* XXX: things may go wrong if there are duplicate font IDs
416 with different font files.
418 list<shared_ptr<Font> > f = j->fonts ();
419 copy (f.begin(), f.end(), back_inserter (fonts));
426 /** Set this player never to produce any video data */
428 Player::set_ignore_video ()
430 boost::mutex::scoped_lock lm (_mutex);
431 _ignore_video = true;
432 setup_pieces_unlocked ();
436 Player::set_ignore_audio ()
438 boost::mutex::scoped_lock lm (_mutex);
439 _ignore_audio = true;
440 setup_pieces_unlocked ();
444 Player::set_ignore_text ()
446 boost::mutex::scoped_lock lm (_mutex);
448 setup_pieces_unlocked ();
451 /** Set the player to always burn open texts into the image regardless of the content settings */
453 Player::set_always_burn_open_subtitles ()
455 boost::mutex::scoped_lock lm (_mutex);
456 _always_burn_open_subtitles = true;
459 /** Sets up the player to be faster, possibly at the expense of quality */
463 boost::mutex::scoped_lock lm (_mutex);
465 setup_pieces_unlocked ();
469 Player::set_play_referenced ()
471 boost::mutex::scoped_lock lm (_mutex);
472 _play_referenced = true;
473 setup_pieces_unlocked ();
476 list<ReferencedReelAsset>
477 Player::get_reel_assets ()
479 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
481 list<ReferencedReelAsset> a;
483 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
484 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
489 scoped_ptr<DCPDecoder> decoder;
491 decoder.reset (new DCPDecoder (j, _film->log(), false));
497 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
499 DCPOMATIC_ASSERT (j->video_frame_rate ());
500 double const cfr = j->video_frame_rate().get();
501 Frame const trim_start = j->trim_start().frames_round (cfr);
502 Frame const trim_end = j->trim_end().frames_round (cfr);
503 int const ffr = _film->video_frame_rate ();
505 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
506 if (j->reference_video ()) {
507 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
508 DCPOMATIC_ASSERT (ra);
509 ra->set_entry_point (ra->entry_point() + trim_start);
510 ra->set_duration (ra->duration() - trim_start - trim_end);
512 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
516 if (j->reference_audio ()) {
517 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
518 DCPOMATIC_ASSERT (ra);
519 ra->set_entry_point (ra->entry_point() + trim_start);
520 ra->set_duration (ra->duration() - trim_start - trim_end);
522 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
526 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
527 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
528 DCPOMATIC_ASSERT (ra);
529 ra->set_entry_point (ra->entry_point() + trim_start);
530 ra->set_duration (ra->duration() - trim_start - trim_end);
532 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
536 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
537 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
538 DCPOMATIC_ASSERT (ra);
539 ra->set_entry_point (ra->entry_point() + trim_start);
540 ra->set_duration (ra->duration() - trim_start - trim_end);
542 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
546 /* Assume that main picture duration is the length of the reel */
547 offset += k->main_picture()->duration ();
557 boost::mutex::scoped_lock lm (_mutex);
560 /* We can't pass in this state */
564 if (_playlist->length() == DCPTime()) {
565 /* Special case of an empty Film; just give one black frame */
566 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
570 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
572 shared_ptr<Piece> earliest_content;
573 optional<DCPTime> earliest_time;
575 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
580 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
581 if (t > i->content->end()) {
585 /* Given two choices at the same time, pick the one with texts so we see it before
588 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
590 earliest_content = i;
604 if (earliest_content) {
608 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
609 earliest_time = _black.position ();
613 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
614 earliest_time = _silent.position ();
620 earliest_content->done = earliest_content->decoder->pass ();
623 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
624 _black.set_position (_black.position() + one_video_frame());
628 DCPTimePeriod period (_silent.period_at_position());
629 if (_last_audio_time) {
630 /* Sometimes the thing that happened last finishes fractionally before
631 this silence. Bodge the start time of the silence to fix it. I'm
632 not sure if this is the right solution --- maybe the last thing should
633 be padded `forward' rather than this thing padding `back'.
635 period.from = min(period.from, *_last_audio_time);
637 if (period.duration() > one_video_frame()) {
638 period.to = period.from + one_video_frame();
641 _silent.set_position (period.to);
649 /* Emit any audio that is ready */
651 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
652 of our streams, or the position of the _silent.
654 DCPTime pull_to = _film->length ();
655 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
656 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
657 pull_to = i->second.last_push_end;
660 if (!_silent.done() && _silent.position() < pull_to) {
661 pull_to = _silent.position();
664 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
665 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
666 if (_last_audio_time && i->second < *_last_audio_time) {
667 /* This new data comes before the last we emitted (or the last seek); discard it */
668 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
673 } else if (_last_audio_time && i->second > *_last_audio_time) {
674 /* There's a gap between this data and the last we emitted; fill with silence */
675 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
678 emit_audio (i->first, i->second);
683 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
684 do_emit_video(i->first, i->second);
691 /** @return Open subtitles for the frame at the given time, converted to images */
692 optional<PositionImage>
693 Player::open_subtitles_for_frame (DCPTime time) const
695 list<PositionImage> captions;
696 int const vfr = _film->video_frame_rate();
700 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
703 /* Bitmap subtitles */
704 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
705 copy (c.begin(), c.end(), back_inserter (captions));
707 /* String subtitles (rendered to an image) */
708 if (!j.string.empty ()) {
709 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
710 copy (s.begin(), s.end(), back_inserter (captions));
714 if (captions.empty ()) {
715 return optional<PositionImage> ();
718 return merge (captions);
722 Player::video (weak_ptr<Piece> wp, ContentVideo video)
724 shared_ptr<Piece> piece = wp.lock ();
729 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
730 if (frc.skip && (video.frame % 2) == 1) {
734 /* Time of the first frame we will emit */
735 DCPTime const time = content_video_to_dcp (piece, video.frame);
737 /* Discard if it's before the content's period or the last accurate seek. We can't discard
738 if it's after the content's period here as in that case we still need to fill any gap between
739 `now' and the end of the content's period.
741 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
745 /* Fill gaps that we discover now that we have some video which needs to be emitted.
746 This is where we need to fill to.
748 DCPTime fill_to = min (time, piece->content->end());
750 if (_last_video_time) {
751 DCPTime fill_from = max (*_last_video_time, piece->content->position());
752 LastVideoMap::const_iterator last = _last_video.find (wp);
753 if (_film->three_d()) {
754 Eyes fill_to_eyes = video.eyes;
755 if (fill_to == piece->content->end()) {
756 /* Don't fill after the end of the content */
757 fill_to_eyes = EYES_LEFT;
759 DCPTime j = fill_from;
760 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
761 if (eyes == EYES_BOTH) {
764 while (j < fill_to || eyes != fill_to_eyes) {
765 if (last != _last_video.end()) {
766 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
767 copy->set_eyes (eyes);
768 emit_video (copy, j);
770 emit_video (black_player_video_frame(eyes), j);
772 if (eyes == EYES_RIGHT) {
773 j += one_video_frame();
775 eyes = increment_eyes (eyes);
778 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
779 if (last != _last_video.end()) {
780 emit_video (last->second, j);
782 emit_video (black_player_video_frame(EYES_BOTH), j);
788 _last_video[wp].reset (
791 piece->content->video->crop (),
792 piece->content->video->fade (video.frame),
793 piece->content->video->scale().size (
794 piece->content->video, _video_container_size, _film->frame_size ()
796 _video_container_size,
799 piece->content->video->colour_conversion(),
806 for (int i = 0; i < frc.repeat; ++i) {
807 if (t < piece->content->end()) {
808 emit_video (_last_video[wp], t);
810 t += one_video_frame ();
815 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
817 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
819 shared_ptr<Piece> piece = wp.lock ();
824 shared_ptr<AudioContent> content = piece->content->audio;
825 DCPOMATIC_ASSERT (content);
827 /* Compute time in the DCP */
828 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
829 /* And the end of this block in the DCP */
830 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
832 /* Remove anything that comes before the start or after the end of the content */
833 if (time < piece->content->position()) {
834 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
836 /* This audio is entirely discarded */
839 content_audio.audio = cut.first;
841 } else if (time > piece->content->end()) {
844 } else if (end > piece->content->end()) {
845 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
846 if (remaining_frames == 0) {
849 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
850 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
851 content_audio.audio = cut;
854 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
858 if (content->gain() != 0) {
859 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
860 gain->apply_gain (content->gain ());
861 content_audio.audio = gain;
866 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
870 if (_audio_processor) {
871 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
876 _audio_merger.push (content_audio.audio, time);
877 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
878 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
882 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
884 shared_ptr<Piece> piece = wp.lock ();
885 shared_ptr<const TextContent> text = wc.lock ();
886 if (!piece || !text) {
890 /* Apply content's subtitle offsets */
891 subtitle.sub.rectangle.x += text->x_offset ();
892 subtitle.sub.rectangle.y += text->y_offset ();
894 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
895 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
896 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
898 /* Apply content's subtitle scale */
899 subtitle.sub.rectangle.width *= text->x_scale ();
900 subtitle.sub.rectangle.height *= text->y_scale ();
903 ps.bitmap.push_back (subtitle.sub);
904 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
906 _active_texts[subtitle.type()].add_from (wc, ps, from);
910 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
912 shared_ptr<Piece> piece = wp.lock ();
913 shared_ptr<const TextContent> text = wc.lock ();
914 if (!piece || !text) {
919 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
921 if (from > piece->content->end()) {
925 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
926 s.set_h_position (s.h_position() + text->x_offset ());
927 s.set_v_position (s.v_position() + text->y_offset ());
928 float const xs = text->x_scale();
929 float const ys = text->y_scale();
930 float size = s.size();
932 /* Adjust size to express the common part of the scaling;
933 e.g. if xs = ys = 0.5 we scale size by 2.
935 if (xs > 1e-5 && ys > 1e-5) {
936 size *= 1 / min (1 / xs, 1 / ys);
940 /* Then express aspect ratio changes */
941 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
942 s.set_aspect_adjust (xs / ys);
945 s.set_in (dcp::Time(from.seconds(), 1000));
946 ps.string.push_back (StringText (s, text->outline_width()));
947 ps.add_fonts (text->fonts ());
950 _active_texts[subtitle.type()].add_from (wc, ps, from);
954 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
956 if (!_active_texts[type].have (wc)) {
960 shared_ptr<Piece> piece = wp.lock ();
961 shared_ptr<const TextContent> text = wc.lock ();
962 if (!piece || !text) {
966 DCPTime const dcp_to = content_time_to_dcp (piece, to);
968 if (dcp_to > piece->content->end()) {
972 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
974 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
975 if (text->use() && !always && !text->burn()) {
976 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
981 Player::seek (DCPTime time, bool accurate)
983 boost::mutex::scoped_lock lm (_mutex);
986 /* We can't seek in this state */
996 if (_audio_processor) {
997 _audio_processor->flush ();
1000 _audio_merger.clear ();
1001 for (int i = 0; i < TEXT_COUNT; ++i) {
1002 _active_texts[i].clear ();
1005 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1006 if (time < i->content->position()) {
1007 /* Before; seek to the start of the content */
1008 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1010 } else if (i->content->position() <= time && time < i->content->end()) {
1011 /* During; seek to position */
1012 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1015 /* After; this piece is done */
1021 _last_video_time = time;
1022 _last_video_eyes = EYES_LEFT;
1023 _last_audio_time = time;
1025 _last_video_time = optional<DCPTime>();
1026 _last_video_eyes = optional<Eyes>();
1027 _last_audio_time = optional<DCPTime>();
1030 _black.set_position (time);
1031 _silent.set_position (time);
1033 _last_video.clear ();
1037 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1039 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1040 player before the video that requires them.
1042 _delay.push_back (make_pair (pv, time));
1044 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1045 _last_video_time = time + one_video_frame();
1047 _last_video_eyes = increment_eyes (pv->eyes());
1049 if (_delay.size() < 3) {
1053 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1055 do_emit_video (to_do.first, to_do.second);
1059 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1061 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1062 for (int i = 0; i < TEXT_COUNT; ++i) {
1063 _active_texts[i].clear_before (time);
1067 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1069 pv->set_text (subtitles.get ());
1076 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1078 /* Log if the assert below is about to fail */
1079 if (_last_audio_time && time != *_last_audio_time) {
1080 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1083 /* This audio must follow on from the previous */
1084 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1086 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1090 Player::fill_audio (DCPTimePeriod period)
1092 if (period.from == period.to) {
1096 DCPOMATIC_ASSERT (period.from < period.to);
1098 DCPTime t = period.from;
1099 while (t < period.to) {
1100 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1101 Frame const samples = block.frames_round(_film->audio_frame_rate());
1103 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1104 silence->make_silent ();
1105 emit_audio (silence, t);
1112 Player::one_video_frame () const
1114 return DCPTime::from_frames (1, _film->video_frame_rate ());
1117 pair<shared_ptr<AudioBuffers>, DCPTime>
1118 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1120 DCPTime const discard_time = discard_to - time;
1121 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1122 Frame remaining_frames = audio->frames() - discard_frames;
1123 if (remaining_frames <= 0) {
1124 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1126 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1127 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1128 return make_pair(cut, time + discard_time);
1132 Player::set_dcp_decode_reduction (optional<int> reduction)
1134 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1137 boost::mutex::scoped_lock lm (_mutex);
1139 if (reduction == _dcp_decode_reduction) {
1141 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1145 _dcp_decode_reduction = reduction;
1146 setup_pieces_unlocked ();
1149 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1153 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1155 boost::mutex::scoped_lock lm (_mutex);
1157 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1158 if (i->content == content) {
1159 return content_time_to_dcp (i, t);
1163 /* We couldn't find this content; perhaps things are being changed over */
1164 return optional<DCPTime>();