2 Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
101 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102 /* The butler must hear about this first, so since we are proxying this through to the butler we must
105 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107 set_video_container_size (_film->frame_size ());
109 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
112 seek (DCPTime (), true);
121 Player::setup_pieces ()
123 boost::mutex::scoped_lock lm (_mutex);
124 setup_pieces_unlocked ();
128 have_video (shared_ptr<Piece> piece)
130 return piece->decoder && piece->decoder->video;
134 have_audio (shared_ptr<Piece> piece)
136 return piece->decoder && piece->decoder->audio;
140 Player::setup_pieces_unlocked ()
142 list<shared_ptr<Piece> > old_pieces = _pieces;
146 _shuffler = new Shuffler();
147 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
149 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
151 if (!i->paths_valid ()) {
155 if (_ignore_video && _ignore_audio && i->text.empty()) {
156 /* We're only interested in text and this content has none */
160 shared_ptr<Decoder> old_decoder;
161 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162 if (j->content == i) {
163 old_decoder = j->decoder;
168 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169 FrameRateChange frc (_film, i);
172 /* Not something that we can decode; e.g. Atmos content */
176 if (decoder->video && _ignore_video) {
177 decoder->video->set_ignore (true);
180 if (decoder->audio && _ignore_audio) {
181 decoder->audio->set_ignore (true);
185 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186 i->set_ignore (true);
190 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
192 dcp->set_decode_referenced (_play_referenced);
193 if (_play_referenced) {
194 dcp->set_forced_reduction (_dcp_decode_reduction);
198 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199 _pieces.push_back (piece);
201 if (decoder->video) {
202 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
206 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
210 if (decoder->audio) {
211 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
214 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
216 while (j != decoder->text.end()) {
217 (*j)->BitmapStart.connect (
218 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
220 (*j)->PlainStart.connect (
221 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
224 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
231 _stream_states.clear ();
232 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233 if (i->content->audio) {
234 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235 _stream_states[j] = StreamState (i, i->content->position ());
240 _black = Empty (_film, _pieces, bind(&have_video, _1));
241 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
243 _last_video_time = DCPTime ();
244 _last_video_eyes = EYES_BOTH;
245 _last_audio_time = DCPTime ();
249 Player::playlist_content_change (ChangeType type, int property, bool frequent)
251 if (type == CHANGE_TYPE_PENDING) {
252 boost::mutex::scoped_lock lm (_mutex);
253 /* The player content is probably about to change, so we can't carry on
254 until that has happened and we've rebuilt our pieces. Stop pass()
255 and seek() from working until then.
258 } else if (type == CHANGE_TYPE_DONE) {
259 /* A change in our content has gone through. Re-build our pieces. */
262 } else if (type == CHANGE_TYPE_CANCELLED) {
263 boost::mutex::scoped_lock lm (_mutex);
267 Change (type, property, frequent);
271 Player::set_video_container_size (dcp::Size s)
273 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
276 boost::mutex::scoped_lock lm (_mutex);
278 if (s == _video_container_size) {
280 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284 _video_container_size = s;
286 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
287 _black_image->make_black ();
290 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
294 Player::playlist_change (ChangeType type)
296 if (type == CHANGE_TYPE_DONE) {
299 Change (type, PlayerProperty::PLAYLIST, false);
303 Player::film_change (ChangeType type, Film::Property p)
305 /* Here we should notice Film properties that affect our output, and
306 alert listeners that our output now would be different to how it was
307 last time we were run.
310 if (p == Film::CONTAINER) {
311 Change (type, PlayerProperty::FILM_CONTAINER, false);
312 } else if (p == Film::VIDEO_FRAME_RATE) {
313 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
314 so we need new pieces here.
316 if (type == CHANGE_TYPE_DONE) {
319 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
320 } else if (p == Film::AUDIO_PROCESSOR) {
321 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
322 boost::mutex::scoped_lock lm (_mutex);
323 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
325 } else if (p == Film::AUDIO_CHANNELS) {
326 if (type == CHANGE_TYPE_DONE) {
327 boost::mutex::scoped_lock lm (_mutex);
328 _audio_merger.clear ();
333 shared_ptr<PlayerVideo>
334 Player::black_player_video_frame (Eyes eyes) const
336 return shared_ptr<PlayerVideo> (
338 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
341 _video_container_size,
342 _video_container_size,
345 PresetColourConversion::all().front().conversion,
347 boost::weak_ptr<Content>(),
348 boost::optional<Frame>()
354 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
356 DCPTime s = t - piece->content->position ();
357 s = min (piece->content->length_after_trim(_film), s);
358 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
360 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
361 then convert that ContentTime to frames at the content's rate. However this fails for
362 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
363 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
365 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
367 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
371 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 /* See comment in dcp_to_content_video */
374 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
375 return d + piece->content->position();
379 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(_film), s);
383 /* See notes in dcp_to_content_video */
384 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
388 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
390 /* See comment in dcp_to_content_video */
391 return DCPTime::from_frames (f, _film->audio_frame_rate())
392 - DCPTime (piece->content->trim_start(), piece->frc)
393 + piece->content->position();
397 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
399 DCPTime s = t - piece->content->position ();
400 s = min (piece->content->length_after_trim(_film), s);
401 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
405 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
407 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
410 list<shared_ptr<Font> >
411 Player::get_subtitle_fonts ()
413 boost::mutex::scoped_lock lm (_mutex);
415 list<shared_ptr<Font> > fonts;
416 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
417 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
418 /* XXX: things may go wrong if there are duplicate font IDs
419 with different font files.
421 list<shared_ptr<Font> > f = j->fonts ();
422 copy (f.begin(), f.end(), back_inserter (fonts));
429 /** Set this player never to produce any video data */
431 Player::set_ignore_video ()
433 boost::mutex::scoped_lock lm (_mutex);
434 _ignore_video = true;
435 setup_pieces_unlocked ();
439 Player::set_ignore_audio ()
441 boost::mutex::scoped_lock lm (_mutex);
442 _ignore_audio = true;
443 setup_pieces_unlocked ();
447 Player::set_ignore_text ()
449 boost::mutex::scoped_lock lm (_mutex);
451 setup_pieces_unlocked ();
454 /** Set the player to always burn open texts into the image regardless of the content settings */
456 Player::set_always_burn_open_subtitles ()
458 boost::mutex::scoped_lock lm (_mutex);
459 _always_burn_open_subtitles = true;
462 /** Sets up the player to be faster, possibly at the expense of quality */
466 boost::mutex::scoped_lock lm (_mutex);
468 setup_pieces_unlocked ();
472 Player::set_play_referenced ()
474 boost::mutex::scoped_lock lm (_mutex);
475 _play_referenced = true;
476 setup_pieces_unlocked ();
480 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
482 DCPOMATIC_ASSERT (r);
483 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
484 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
485 if (r->actual_duration() > 0) {
487 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
492 list<ReferencedReelAsset>
493 Player::get_reel_assets ()
495 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
497 list<ReferencedReelAsset> a;
499 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
500 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
505 scoped_ptr<DCPDecoder> decoder;
507 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
512 DCPOMATIC_ASSERT (j->video_frame_rate ());
513 double const cfr = j->video_frame_rate().get();
514 Frame const trim_start = j->trim_start().frames_round (cfr);
515 Frame const trim_end = j->trim_end().frames_round (cfr);
516 int const ffr = _film->video_frame_rate ();
518 /* position in the asset from the start */
519 int64_t offset_from_start = 0;
520 /* position in the asset from the end */
521 int64_t offset_from_end = 0;
522 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
523 /* Assume that main picture duration is the length of the reel */
524 offset_from_end += k->main_picture()->actual_duration();
527 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
529 /* Assume that main picture duration is the length of the reel */
530 int64_t const reel_duration = k->main_picture()->actual_duration();
532 /* See doc/design/trim_reels.svg */
533 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
534 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
536 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
537 if (j->reference_video ()) {
538 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
541 if (j->reference_audio ()) {
542 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
545 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
546 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
549 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
550 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
551 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
555 offset_from_start += reel_duration;
556 offset_from_end -= reel_duration;
566 boost::mutex::scoped_lock lm (_mutex);
569 /* We can't pass in this state */
573 if (_playlist->length(_film) == DCPTime()) {
574 /* Special case of an empty Film; just give one black frame */
575 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
579 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
581 shared_ptr<Piece> earliest_content;
582 optional<DCPTime> earliest_time;
584 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
589 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
590 if (t > i->content->end(_film)) {
594 /* Given two choices at the same time, pick the one with texts so we see it before
597 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
599 earliest_content = i;
613 if (earliest_content) {
617 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
618 earliest_time = _black.position ();
622 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
623 earliest_time = _silent.position ();
630 earliest_content->done = earliest_content->decoder->pass ();
631 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
632 if (dcp && !_play_referenced && dcp->reference_audio()) {
633 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
634 to `hide' the fact that no audio was emitted during the referenced DCP (though
635 we need to behave as though it was).
637 _last_audio_time = dcp->end (_film);
642 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
643 _black.set_position (_black.position() + one_video_frame());
647 DCPTimePeriod period (_silent.period_at_position());
648 if (_last_audio_time) {
649 /* Sometimes the thing that happened last finishes fractionally before
650 or after this silence. Bodge the start time of the silence to fix it.
652 DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
653 period.from = *_last_audio_time;
655 if (period.duration() > one_video_frame()) {
656 period.to = period.from + one_video_frame();
659 _silent.set_position (period.to);
667 /* Emit any audio that is ready */
669 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
670 of our streams, or the position of the _silent.
672 DCPTime pull_to = _film->length ();
673 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
674 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
675 pull_to = i->second.last_push_end;
678 if (!_silent.done() && _silent.position() < pull_to) {
679 pull_to = _silent.position();
682 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
683 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
684 if (_last_audio_time && i->second < *_last_audio_time) {
685 /* This new data comes before the last we emitted (or the last seek); discard it */
686 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
691 } else if (_last_audio_time && i->second > *_last_audio_time) {
692 /* There's a gap between this data and the last we emitted; fill with silence */
693 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
696 emit_audio (i->first, i->second);
701 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
702 do_emit_video(i->first, i->second);
709 /** @return Open subtitles for the frame at the given time, converted to images */
710 optional<PositionImage>
711 Player::open_subtitles_for_frame (DCPTime time) const
713 list<PositionImage> captions;
714 int const vfr = _film->video_frame_rate();
718 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
721 /* Bitmap subtitles */
722 BOOST_FOREACH (BitmapText i, j.bitmap) {
727 /* i.image will already have been scaled to fit _video_container_size */
728 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
734 lrint (_video_container_size.width * i.rectangle.x),
735 lrint (_video_container_size.height * i.rectangle.y)
741 /* String subtitles (rendered to an image) */
742 if (!j.string.empty ()) {
743 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
744 copy (s.begin(), s.end(), back_inserter (captions));
748 if (captions.empty ()) {
749 return optional<PositionImage> ();
752 return merge (captions);
756 Player::video (weak_ptr<Piece> wp, ContentVideo video)
758 shared_ptr<Piece> piece = wp.lock ();
763 FrameRateChange frc (_film, piece->content);
764 if (frc.skip && (video.frame % 2) == 1) {
768 /* Time of the first frame we will emit */
769 DCPTime const time = content_video_to_dcp (piece, video.frame);
771 /* Discard if it's before the content's period or the last accurate seek. We can't discard
772 if it's after the content's period here as in that case we still need to fill any gap between
773 `now' and the end of the content's period.
775 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
779 /* Fill gaps that we discover now that we have some video which needs to be emitted.
780 This is where we need to fill to.
782 DCPTime fill_to = min (time, piece->content->end(_film));
784 if (_last_video_time) {
785 DCPTime fill_from = max (*_last_video_time, piece->content->position());
787 /* Fill if we have more than half a frame to do */
788 if ((fill_to - fill_from) > one_video_frame() / 2) {
789 LastVideoMap::const_iterator last = _last_video.find (wp);
790 if (_film->three_d()) {
791 Eyes fill_to_eyes = video.eyes;
792 if (fill_to_eyes == EYES_BOTH) {
793 fill_to_eyes = EYES_LEFT;
795 if (fill_to == piece->content->end(_film)) {
796 /* Don't fill after the end of the content */
797 fill_to_eyes = EYES_LEFT;
799 DCPTime j = fill_from;
800 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
801 if (eyes == EYES_BOTH) {
804 while (j < fill_to || eyes != fill_to_eyes) {
805 if (last != _last_video.end()) {
806 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
807 copy->set_eyes (eyes);
808 emit_video (copy, j);
810 emit_video (black_player_video_frame(eyes), j);
812 if (eyes == EYES_RIGHT) {
813 j += one_video_frame();
815 eyes = increment_eyes (eyes);
818 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
819 if (last != _last_video.end()) {
820 emit_video (last->second, j);
822 emit_video (black_player_video_frame(EYES_BOTH), j);
829 _last_video[wp].reset (
832 piece->content->video->crop (),
833 piece->content->video->fade (_film, video.frame),
834 piece->content->video->scale().size (
835 piece->content->video, _video_container_size, _film->frame_size ()
837 _video_container_size,
840 piece->content->video->colour_conversion(),
841 piece->content->video->range(),
848 for (int i = 0; i < frc.repeat; ++i) {
849 if (t < piece->content->end(_film)) {
850 emit_video (_last_video[wp], t);
852 t += one_video_frame ();
857 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
859 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
861 shared_ptr<Piece> piece = wp.lock ();
866 shared_ptr<AudioContent> content = piece->content->audio;
867 DCPOMATIC_ASSERT (content);
869 int const rfr = content->resampled_frame_rate (_film);
871 /* Compute time in the DCP */
872 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
873 /* And the end of this block in the DCP */
874 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
876 /* Remove anything that comes before the start or after the end of the content */
877 if (time < piece->content->position()) {
878 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
880 /* This audio is entirely discarded */
883 content_audio.audio = cut.first;
885 } else if (time > piece->content->end(_film)) {
888 } else if (end > piece->content->end(_film)) {
889 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
890 if (remaining_frames == 0) {
893 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
894 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
895 content_audio.audio = cut;
898 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
902 if (content->gain() != 0) {
903 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
904 gain->apply_gain (content->gain ());
905 content_audio.audio = gain;
910 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
914 if (_audio_processor) {
915 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
920 _audio_merger.push (content_audio.audio, time);
921 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
922 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
926 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
928 shared_ptr<Piece> piece = wp.lock ();
929 shared_ptr<const TextContent> text = wc.lock ();
930 if (!piece || !text) {
934 /* Apply content's subtitle offsets */
935 subtitle.sub.rectangle.x += text->x_offset ();
936 subtitle.sub.rectangle.y += text->y_offset ();
938 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
939 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
940 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
942 /* Apply content's subtitle scale */
943 subtitle.sub.rectangle.width *= text->x_scale ();
944 subtitle.sub.rectangle.height *= text->y_scale ();
947 shared_ptr<Image> image = subtitle.sub.image;
948 /* We will scale the subtitle up to fit _video_container_size */
949 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
950 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
951 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
953 _active_texts[text->type()].add_from (wc, ps, from);
957 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
959 shared_ptr<Piece> piece = wp.lock ();
960 shared_ptr<const TextContent> text = wc.lock ();
961 if (!piece || !text) {
966 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
968 if (from > piece->content->end(_film)) {
972 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
973 s.set_h_position (s.h_position() + text->x_offset ());
974 s.set_v_position (s.v_position() + text->y_offset ());
975 float const xs = text->x_scale();
976 float const ys = text->y_scale();
977 float size = s.size();
979 /* Adjust size to express the common part of the scaling;
980 e.g. if xs = ys = 0.5 we scale size by 2.
982 if (xs > 1e-5 && ys > 1e-5) {
983 size *= 1 / min (1 / xs, 1 / ys);
987 /* Then express aspect ratio changes */
988 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
989 s.set_aspect_adjust (xs / ys);
992 s.set_in (dcp::Time(from.seconds(), 1000));
993 ps.string.push_back (StringText (s, text->outline_width()));
994 ps.add_fonts (text->fonts ());
997 _active_texts[text->type()].add_from (wc, ps, from);
1001 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1003 shared_ptr<const TextContent> text = wc.lock ();
1008 if (!_active_texts[text->type()].have(wc)) {
1012 shared_ptr<Piece> piece = wp.lock ();
1017 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1019 if (dcp_to > piece->content->end(_film)) {
1023 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1025 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1026 if (text->use() && !always && !text->burn()) {
1027 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1032 Player::seek (DCPTime time, bool accurate)
1034 boost::mutex::scoped_lock lm (_mutex);
1037 /* We can't seek in this state */
1042 _shuffler->clear ();
1047 if (_audio_processor) {
1048 _audio_processor->flush ();
1051 _audio_merger.clear ();
1052 for (int i = 0; i < TEXT_COUNT; ++i) {
1053 _active_texts[i].clear ();
1056 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1057 if (time < i->content->position()) {
1058 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1059 we must seek this (following) content accurately, otherwise when we come to the end of the current
1060 content we may not start right at the beginning of the next, causing a gap (if the next content has
1061 been trimmed to a point between keyframes, or something).
1063 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1065 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1066 /* During; seek to position */
1067 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1070 /* After; this piece is done */
1076 _last_video_time = time;
1077 _last_video_eyes = EYES_LEFT;
1078 _last_audio_time = time;
1080 _last_video_time = optional<DCPTime>();
1081 _last_video_eyes = optional<Eyes>();
1082 _last_audio_time = optional<DCPTime>();
1085 _black.set_position (time);
1086 _silent.set_position (time);
1088 _last_video.clear ();
1092 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1094 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1095 player before the video that requires them.
1097 _delay.push_back (make_pair (pv, time));
1099 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1100 _last_video_time = time + one_video_frame();
1102 _last_video_eyes = increment_eyes (pv->eyes());
1104 if (_delay.size() < 3) {
1108 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1110 do_emit_video (to_do.first, to_do.second);
1114 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1116 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1117 for (int i = 0; i < TEXT_COUNT; ++i) {
1118 _active_texts[i].clear_before (time);
1122 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1124 pv->set_text (subtitles.get ());
1131 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1133 /* Log if the assert below is about to fail */
1134 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1135 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1138 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1139 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1140 Audio (data, time, _film->audio_frame_rate());
1141 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1145 Player::fill_audio (DCPTimePeriod period)
1147 if (period.from == period.to) {
1151 DCPOMATIC_ASSERT (period.from < period.to);
1153 DCPTime t = period.from;
1154 while (t < period.to) {
1155 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1156 Frame const samples = block.frames_round(_film->audio_frame_rate());
1158 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1159 silence->make_silent ();
1160 emit_audio (silence, t);
1167 Player::one_video_frame () const
1169 return DCPTime::from_frames (1, _film->video_frame_rate ());
1172 pair<shared_ptr<AudioBuffers>, DCPTime>
1173 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1175 DCPTime const discard_time = discard_to - time;
1176 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1177 Frame remaining_frames = audio->frames() - discard_frames;
1178 if (remaining_frames <= 0) {
1179 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1181 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1182 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1183 return make_pair(cut, time + discard_time);
1187 Player::set_dcp_decode_reduction (optional<int> reduction)
1189 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1192 boost::mutex::scoped_lock lm (_mutex);
1194 if (reduction == _dcp_decode_reduction) {
1196 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1200 _dcp_decode_reduction = reduction;
1201 setup_pieces_unlocked ();
1204 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1208 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1210 boost::mutex::scoped_lock lm (_mutex);
1212 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1213 if (i->content == content) {
1214 return content_time_to_dcp (i, t);
1218 /* We couldn't find this content; perhaps things are being changed over */
1219 return optional<DCPTime>();