2 Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _tolerant (film->tolerant())
97 , _play_referenced (false)
98 , _audio_merger (_film->audio_frame_rate())
101 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102 /* The butler must hear about this first, so since we are proxying this through to the butler we must
105 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107 set_video_container_size (_film->frame_size ());
109 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
112 seek (DCPTime (), true);
121 Player::setup_pieces ()
123 boost::mutex::scoped_lock lm (_mutex);
124 setup_pieces_unlocked ();
128 have_video (shared_ptr<Piece> piece)
130 return piece->decoder && piece->decoder->video;
134 have_audio (shared_ptr<Piece> piece)
136 return piece->decoder && piece->decoder->audio;
140 Player::setup_pieces_unlocked ()
142 list<shared_ptr<Piece> > old_pieces = _pieces;
146 _shuffler = new Shuffler();
147 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
149 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
151 if (!i->paths_valid ()) {
155 if (_ignore_video && _ignore_audio && i->text.empty()) {
156 /* We're only interested in text and this content has none */
160 shared_ptr<Decoder> old_decoder;
161 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162 if (j->content == i) {
163 old_decoder = j->decoder;
168 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169 FrameRateChange frc (_film, i);
172 /* Not something that we can decode; e.g. Atmos content */
176 if (decoder->video && _ignore_video) {
177 decoder->video->set_ignore (true);
180 if (decoder->audio && _ignore_audio) {
181 decoder->audio->set_ignore (true);
185 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186 i->set_ignore (true);
190 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
192 dcp->set_decode_referenced (_play_referenced);
193 if (_play_referenced) {
194 dcp->set_forced_reduction (_dcp_decode_reduction);
198 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199 _pieces.push_back (piece);
201 if (decoder->video) {
202 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
206 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
210 if (decoder->audio) {
211 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
214 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
216 while (j != decoder->text.end()) {
217 (*j)->BitmapStart.connect (
218 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
220 (*j)->PlainStart.connect (
221 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
224 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
231 _stream_states.clear ();
232 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233 if (i->content->audio) {
234 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235 _stream_states[j] = StreamState (i, i->content->position ());
240 _black = Empty (_film, _pieces, bind(&have_video, _1));
241 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
243 _last_video_time = DCPTime ();
244 _last_video_eyes = EYES_BOTH;
245 _last_audio_time = DCPTime ();
249 Player::playlist_content_change (ChangeType type, int property, bool frequent)
251 if (type == CHANGE_TYPE_PENDING) {
252 boost::mutex::scoped_lock lm (_mutex);
253 /* The player content is probably about to change, so we can't carry on
254 until that has happened and we've rebuilt our pieces. Stop pass()
255 and seek() from working until then.
258 } else if (type == CHANGE_TYPE_DONE) {
259 /* A change in our content has gone through. Re-build our pieces. */
262 } else if (type == CHANGE_TYPE_CANCELLED) {
263 boost::mutex::scoped_lock lm (_mutex);
267 Change (type, property, frequent);
271 Player::set_video_container_size (dcp::Size s)
273 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
276 boost::mutex::scoped_lock lm (_mutex);
278 if (s == _video_container_size) {
280 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284 _video_container_size = s;
286 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
287 _black_image->make_black ();
290 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
294 Player::playlist_change (ChangeType type)
296 if (type == CHANGE_TYPE_DONE) {
299 Change (type, PlayerProperty::PLAYLIST, false);
303 Player::film_change (ChangeType type, Film::Property p)
305 /* Here we should notice Film properties that affect our output, and
306 alert listeners that our output now would be different to how it was
307 last time we were run.
310 if (p == Film::CONTAINER) {
311 Change (type, PlayerProperty::FILM_CONTAINER, false);
312 } else if (p == Film::VIDEO_FRAME_RATE) {
313 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
314 so we need new pieces here.
316 if (type == CHANGE_TYPE_DONE) {
319 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
320 } else if (p == Film::AUDIO_PROCESSOR) {
321 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
322 boost::mutex::scoped_lock lm (_mutex);
323 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
325 } else if (p == Film::AUDIO_CHANNELS) {
326 if (type == CHANGE_TYPE_DONE) {
327 boost::mutex::scoped_lock lm (_mutex);
328 _audio_merger.clear ();
333 shared_ptr<PlayerVideo>
334 Player::black_player_video_frame (Eyes eyes) const
336 return shared_ptr<PlayerVideo> (
338 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
341 _video_container_size,
342 _video_container_size,
345 PresetColourConversion::all().front().conversion,
347 boost::weak_ptr<Content>()
353 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
355 DCPTime s = t - piece->content->position ();
356 s = min (piece->content->length_after_trim(_film), s);
357 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
359 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
360 then convert that ContentTime to frames at the content's rate. However this fails for
361 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
362 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
364 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
366 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
370 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
372 /* See comment in dcp_to_content_video */
373 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
374 return d + piece->content->position();
378 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
380 DCPTime s = t - piece->content->position ();
381 s = min (piece->content->length_after_trim(_film), s);
382 /* See notes in dcp_to_content_video */
383 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
387 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
389 /* See comment in dcp_to_content_video */
390 return DCPTime::from_frames (f, _film->audio_frame_rate())
391 - DCPTime (piece->content->trim_start(), piece->frc)
392 + piece->content->position();
396 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
398 DCPTime s = t - piece->content->position ();
399 s = min (piece->content->length_after_trim(_film), s);
400 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
404 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
406 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
409 list<shared_ptr<Font> >
410 Player::get_subtitle_fonts ()
412 boost::mutex::scoped_lock lm (_mutex);
414 list<shared_ptr<Font> > fonts;
415 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
416 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
417 /* XXX: things may go wrong if there are duplicate font IDs
418 with different font files.
420 list<shared_ptr<Font> > f = j->fonts ();
421 copy (f.begin(), f.end(), back_inserter (fonts));
428 /** Set this player never to produce any video data */
430 Player::set_ignore_video ()
432 boost::mutex::scoped_lock lm (_mutex);
433 _ignore_video = true;
434 setup_pieces_unlocked ();
438 Player::set_ignore_audio ()
440 boost::mutex::scoped_lock lm (_mutex);
441 _ignore_audio = true;
442 setup_pieces_unlocked ();
446 Player::set_ignore_text ()
448 boost::mutex::scoped_lock lm (_mutex);
450 setup_pieces_unlocked ();
453 /** Set the player to always burn open texts into the image regardless of the content settings */
455 Player::set_always_burn_open_subtitles ()
457 boost::mutex::scoped_lock lm (_mutex);
458 _always_burn_open_subtitles = true;
461 /** Sets up the player to be faster, possibly at the expense of quality */
465 boost::mutex::scoped_lock lm (_mutex);
467 setup_pieces_unlocked ();
471 Player::set_play_referenced ()
473 boost::mutex::scoped_lock lm (_mutex);
474 _play_referenced = true;
475 setup_pieces_unlocked ();
479 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
481 DCPOMATIC_ASSERT (r);
482 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
483 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
484 if (r->actual_duration() > 0) {
486 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
491 list<ReferencedReelAsset>
492 Player::get_reel_assets ()
494 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
496 list<ReferencedReelAsset> a;
498 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
499 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
504 scoped_ptr<DCPDecoder> decoder;
506 decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
511 DCPOMATIC_ASSERT (j->video_frame_rate ());
512 double const cfr = j->video_frame_rate().get();
513 Frame const trim_start = j->trim_start().frames_round (cfr);
514 Frame const trim_end = j->trim_end().frames_round (cfr);
515 int const ffr = _film->video_frame_rate ();
517 /* position in the asset from the start */
518 int64_t offset_from_start = 0;
519 /* position in the asset from the end */
520 int64_t offset_from_end = 0;
521 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
522 /* Assume that main picture duration is the length of the reel */
523 offset_from_end += k->main_picture()->actual_duration();
526 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
528 /* Assume that main picture duration is the length of the reel */
529 int64_t const reel_duration = k->main_picture()->actual_duration();
531 /* See doc/design/trim_reels.svg */
532 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
533 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
535 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
536 if (j->reference_video ()) {
537 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
540 if (j->reference_audio ()) {
541 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
544 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
545 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
548 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
549 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
550 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
554 offset_from_start += reel_duration;
555 offset_from_end -= reel_duration;
565 boost::mutex::scoped_lock lm (_mutex);
568 /* We can't pass in this state */
572 if (_playlist->length(_film) == DCPTime()) {
573 /* Special case of an empty Film; just give one black frame */
574 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
578 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
580 shared_ptr<Piece> earliest_content;
581 optional<DCPTime> earliest_time;
583 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
588 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
589 if (t > i->content->end(_film)) {
593 /* Given two choices at the same time, pick the one with texts so we see it before
596 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
598 earliest_content = i;
612 if (earliest_content) {
616 if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
617 earliest_time = _black.position ();
621 if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
622 earliest_time = _silent.position ();
629 earliest_content->done = earliest_content->decoder->pass ();
630 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
631 if (dcp && !_play_referenced && dcp->reference_audio()) {
632 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
633 to `hide' the fact that no audio was emitted during the referenced DCP (though
634 we need to behave as though it was).
636 _last_audio_time = dcp->end (_film);
641 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
642 _black.set_position (_black.position() + one_video_frame());
646 DCPTimePeriod period (_silent.period_at_position());
647 if (_last_audio_time) {
648 /* Sometimes the thing that happened last finishes fractionally before
649 or after this silence. Bodge the start time of the silence to fix it.
650 I think this is nothing to worry about since we will just add or
651 remove a little silence at the end of some content.
653 int64_t const error = labs(period.from.get() - _last_audio_time->get());
654 /* Let's not worry about less than a frame at 24fps */
655 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
656 if (error >= too_much_error) {
657 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
659 DCPOMATIC_ASSERT (error < too_much_error);
660 period.from = *_last_audio_time;
662 if (period.duration() > one_video_frame()) {
663 period.to = period.from + one_video_frame();
666 _silent.set_position (period.to);
674 /* Emit any audio that is ready */
676 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
677 of our streams, or the position of the _silent.
679 DCPTime pull_to = _film->length ();
680 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
681 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
682 pull_to = i->second.last_push_end;
685 if (!_silent.done() && _silent.position() < pull_to) {
686 pull_to = _silent.position();
689 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
690 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
691 if (_last_audio_time && i->second < *_last_audio_time) {
692 /* This new data comes before the last we emitted (or the last seek); discard it */
693 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
698 } else if (_last_audio_time && i->second > *_last_audio_time) {
699 /* There's a gap between this data and the last we emitted; fill with silence */
700 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
703 emit_audio (i->first, i->second);
708 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
709 do_emit_video(i->first, i->second);
716 /** @return Open subtitles for the frame at the given time, converted to images */
717 optional<PositionImage>
718 Player::open_subtitles_for_frame (DCPTime time) const
720 list<PositionImage> captions;
721 int const vfr = _film->video_frame_rate();
725 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
728 /* Bitmap subtitles */
729 BOOST_FOREACH (BitmapText i, j.bitmap) {
734 /* i.image will already have been scaled to fit _video_container_size */
735 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
741 lrint (_video_container_size.width * i.rectangle.x),
742 lrint (_video_container_size.height * i.rectangle.y)
748 /* String subtitles (rendered to an image) */
749 if (!j.string.empty ()) {
750 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
751 copy (s.begin(), s.end(), back_inserter (captions));
755 if (captions.empty ()) {
756 return optional<PositionImage> ();
759 return merge (captions);
763 Player::video (weak_ptr<Piece> wp, ContentVideo video)
765 shared_ptr<Piece> piece = wp.lock ();
770 FrameRateChange frc (_film, piece->content);
771 if (frc.skip && (video.frame % 2) == 1) {
775 /* Time of the first frame we will emit */
776 DCPTime const time = content_video_to_dcp (piece, video.frame);
778 /* Discard if it's before the content's period or the last accurate seek. We can't discard
779 if it's after the content's period here as in that case we still need to fill any gap between
780 `now' and the end of the content's period.
782 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
786 /* Fill gaps that we discover now that we have some video which needs to be emitted.
787 This is where we need to fill to.
789 DCPTime fill_to = min (time, piece->content->end(_film));
791 if (_last_video_time) {
792 DCPTime fill_from = max (*_last_video_time, piece->content->position());
794 /* Fill if we have more than half a frame to do */
795 if ((fill_to - fill_from) > one_video_frame() / 2) {
796 LastVideoMap::const_iterator last = _last_video.find (wp);
797 if (_film->three_d()) {
798 Eyes fill_to_eyes = video.eyes;
799 if (fill_to_eyes == EYES_BOTH) {
800 fill_to_eyes = EYES_LEFT;
802 if (fill_to == piece->content->end(_film)) {
803 /* Don't fill after the end of the content */
804 fill_to_eyes = EYES_LEFT;
806 DCPTime j = fill_from;
807 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
808 if (eyes == EYES_BOTH) {
811 while (j < fill_to || eyes != fill_to_eyes) {
812 if (last != _last_video.end()) {
813 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
814 copy->set_eyes (eyes);
815 emit_video (copy, j);
817 emit_video (black_player_video_frame(eyes), j);
819 if (eyes == EYES_RIGHT) {
820 j += one_video_frame();
822 eyes = increment_eyes (eyes);
825 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
826 if (last != _last_video.end()) {
827 emit_video (last->second, j);
829 emit_video (black_player_video_frame(EYES_BOTH), j);
836 _last_video[wp].reset (
839 piece->content->video->crop (),
840 piece->content->video->fade (_film, video.frame),
841 piece->content->video->scale().size (
842 piece->content->video, _video_container_size, _film->frame_size ()
844 _video_container_size,
847 piece->content->video->colour_conversion(),
848 piece->content->video->range(),
854 for (int i = 0; i < frc.repeat; ++i) {
855 if (t < piece->content->end(_film)) {
856 emit_video (_last_video[wp], t);
858 t += one_video_frame ();
863 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
865 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
867 shared_ptr<Piece> piece = wp.lock ();
872 shared_ptr<AudioContent> content = piece->content->audio;
873 DCPOMATIC_ASSERT (content);
875 int const rfr = content->resampled_frame_rate (_film);
877 /* Compute time in the DCP */
878 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
879 /* And the end of this block in the DCP */
880 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
882 /* Remove anything that comes before the start or after the end of the content */
883 if (time < piece->content->position()) {
884 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
886 /* This audio is entirely discarded */
889 content_audio.audio = cut.first;
891 } else if (time > piece->content->end(_film)) {
894 } else if (end > piece->content->end(_film)) {
895 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
896 if (remaining_frames == 0) {
899 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
900 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
901 content_audio.audio = cut;
904 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
908 if (content->gain() != 0) {
909 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
910 gain->apply_gain (content->gain ());
911 content_audio.audio = gain;
916 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
920 if (_audio_processor) {
921 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
926 _audio_merger.push (content_audio.audio, time);
927 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
928 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
932 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
934 shared_ptr<Piece> piece = wp.lock ();
935 shared_ptr<const TextContent> text = wc.lock ();
936 if (!piece || !text) {
940 /* Apply content's subtitle offsets */
941 subtitle.sub.rectangle.x += text->x_offset ();
942 subtitle.sub.rectangle.y += text->y_offset ();
944 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
945 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
946 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
948 /* Apply content's subtitle scale */
949 subtitle.sub.rectangle.width *= text->x_scale ();
950 subtitle.sub.rectangle.height *= text->y_scale ();
953 shared_ptr<Image> image = subtitle.sub.image;
954 /* We will scale the subtitle up to fit _video_container_size */
955 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
956 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
957 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
959 _active_texts[text->type()].add_from (wc, ps, from);
963 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
965 shared_ptr<Piece> piece = wp.lock ();
966 shared_ptr<const TextContent> text = wc.lock ();
967 if (!piece || !text) {
972 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
974 if (from > piece->content->end(_film)) {
978 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
979 s.set_h_position (s.h_position() + text->x_offset ());
980 s.set_v_position (s.v_position() + text->y_offset ());
981 float const xs = text->x_scale();
982 float const ys = text->y_scale();
983 float size = s.size();
985 /* Adjust size to express the common part of the scaling;
986 e.g. if xs = ys = 0.5 we scale size by 2.
988 if (xs > 1e-5 && ys > 1e-5) {
989 size *= 1 / min (1 / xs, 1 / ys);
993 /* Then express aspect ratio changes */
994 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
995 s.set_aspect_adjust (xs / ys);
998 s.set_in (dcp::Time(from.seconds(), 1000));
999 ps.string.push_back (StringText (s, text->outline_width()));
1000 ps.add_fonts (text->fonts ());
1003 _active_texts[text->type()].add_from (wc, ps, from);
1007 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1009 shared_ptr<const TextContent> text = wc.lock ();
1014 if (!_active_texts[text->type()].have(wc)) {
1018 shared_ptr<Piece> piece = wp.lock ();
1023 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1025 if (dcp_to > piece->content->end(_film)) {
1029 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1031 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1032 if (text->use() && !always && !text->burn()) {
1033 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1038 Player::seek (DCPTime time, bool accurate)
1040 boost::mutex::scoped_lock lm (_mutex);
1043 /* We can't seek in this state */
1048 _shuffler->clear ();
1053 if (_audio_processor) {
1054 _audio_processor->flush ();
1057 _audio_merger.clear ();
1058 for (int i = 0; i < TEXT_COUNT; ++i) {
1059 _active_texts[i].clear ();
1062 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1063 if (time < i->content->position()) {
1064 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1065 we must seek this (following) content accurately, otherwise when we come to the end of the current
1066 content we may not start right at the beginning of the next, causing a gap (if the next content has
1067 been trimmed to a point between keyframes, or something).
1069 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1071 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1072 /* During; seek to position */
1073 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1076 /* After; this piece is done */
1082 _last_video_time = time;
1083 _last_video_eyes = EYES_LEFT;
1084 _last_audio_time = time;
1086 _last_video_time = optional<DCPTime>();
1087 _last_video_eyes = optional<Eyes>();
1088 _last_audio_time = optional<DCPTime>();
1091 _black.set_position (time);
1092 _silent.set_position (time);
1094 _last_video.clear ();
1098 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1100 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1101 player before the video that requires them.
1103 _delay.push_back (make_pair (pv, time));
1105 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1106 _last_video_time = time + one_video_frame();
1108 _last_video_eyes = increment_eyes (pv->eyes());
1110 if (_delay.size() < 3) {
1114 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1116 do_emit_video (to_do.first, to_do.second);
1120 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1122 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1123 for (int i = 0; i < TEXT_COUNT; ++i) {
1124 _active_texts[i].clear_before (time);
1128 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1130 pv->set_text (subtitles.get ());
1137 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1139 /* Log if the assert below is about to fail */
1140 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1141 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1144 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1145 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1146 Audio (data, time, _film->audio_frame_rate());
1147 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1151 Player::fill_audio (DCPTimePeriod period)
1153 if (period.from == period.to) {
1157 DCPOMATIC_ASSERT (period.from < period.to);
1159 DCPTime t = period.from;
1160 while (t < period.to) {
1161 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1162 Frame const samples = block.frames_round(_film->audio_frame_rate());
1164 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1165 silence->make_silent ();
1166 emit_audio (silence, t);
1173 Player::one_video_frame () const
1175 return DCPTime::from_frames (1, _film->video_frame_rate ());
1178 pair<shared_ptr<AudioBuffers>, DCPTime>
1179 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1181 DCPTime const discard_time = discard_to - time;
1182 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1183 Frame remaining_frames = audio->frames() - discard_frames;
1184 if (remaining_frames <= 0) {
1185 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1187 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1188 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1189 return make_pair(cut, time + discard_time);
1193 Player::set_dcp_decode_reduction (optional<int> reduction)
1195 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1198 boost::mutex::scoped_lock lm (_mutex);
1200 if (reduction == _dcp_decode_reduction) {
1202 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1206 _dcp_decode_reduction = reduction;
1207 setup_pieces_unlocked ();
1210 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1214 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1216 boost::mutex::scoped_lock lm (_mutex);
1218 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1219 if (i->content == content) {
1220 return content_time_to_dcp (i, t);
1224 /* We couldn't find this content; perhaps things are being changed over */
1225 return optional<DCPTime>();