2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
79 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
80 int const PlayerProperty::PLAYLIST = 701;
81 int const PlayerProperty::FILM_CONTAINER = 702;
82 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
83 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87 , _playlist (playlist)
89 , _ignore_video (false)
90 , _ignore_audio (false)
91 , _ignore_text (false)
92 , _always_burn_open_subtitles (false)
94 , _play_referenced (false)
95 , _audio_merger (_film->audio_frame_rate())
98 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
99 /* The butler must hear about this first, so since we are proxying this through to the butler we must
102 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
103 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
104 set_video_container_size (_film->frame_size ());
106 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109 seek (DCPTime (), true);
118 Player::setup_pieces ()
120 boost::mutex::scoped_lock lm (_mutex);
121 setup_pieces_unlocked ();
125 have_video (shared_ptr<Piece> piece)
127 return piece->decoder && piece->decoder->video;
131 have_audio (shared_ptr<Piece> piece)
133 return piece->decoder && piece->decoder->audio;
137 Player::setup_pieces_unlocked ()
142 _shuffler = new Shuffler();
143 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
145 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
147 if (!i->paths_valid ()) {
151 if (_ignore_video && _ignore_audio && i->text.empty()) {
152 /* We're only interested in text and this content has none */
156 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
157 FrameRateChange frc (_film, i);
160 /* Not something that we can decode; e.g. Atmos content */
164 if (decoder->video && _ignore_video) {
165 decoder->video->set_ignore (true);
168 if (decoder->audio && _ignore_audio) {
169 decoder->audio->set_ignore (true);
173 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
174 i->set_ignore (true);
178 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
180 dcp->set_decode_referenced (_play_referenced);
181 if (_play_referenced) {
182 dcp->set_forced_reduction (_dcp_decode_reduction);
186 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
187 _pieces.push_back (piece);
189 if (decoder->video) {
190 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
191 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
192 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
194 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
198 if (decoder->audio) {
199 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
202 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
204 while (j != decoder->text.end()) {
205 (*j)->BitmapStart.connect (
206 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
208 (*j)->PlainStart.connect (
209 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
212 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219 _stream_states.clear ();
220 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
221 if (i->content->audio) {
222 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
223 _stream_states[j] = StreamState (i, i->content->position ());
228 _black = Empty (_film, _pieces, bind(&have_video, _1));
229 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
231 _last_video_time = DCPTime ();
232 _last_video_eyes = EYES_BOTH;
233 _last_audio_time = DCPTime ();
237 Player::playlist_content_change (ChangeType type, int property, bool frequent)
239 if (type == CHANGE_TYPE_PENDING) {
240 boost::mutex::scoped_lock lm (_mutex);
241 /* The player content is probably about to change, so we can't carry on
242 until that has happened and we've rebuilt our pieces. Stop pass()
243 and seek() from working until then.
246 } else if (type == CHANGE_TYPE_DONE) {
247 /* A change in our content has gone through. Re-build our pieces. */
250 } else if (type == CHANGE_TYPE_CANCELLED) {
251 boost::mutex::scoped_lock lm (_mutex);
255 Change (type, property, frequent);
259 Player::set_video_container_size (dcp::Size s)
261 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
264 boost::mutex::scoped_lock lm (_mutex);
266 if (s == _video_container_size) {
268 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
272 _video_container_size = s;
274 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
275 _black_image->make_black ();
278 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
282 Player::playlist_change (ChangeType type)
284 if (type == CHANGE_TYPE_DONE) {
287 Change (type, PlayerProperty::PLAYLIST, false);
291 Player::film_change (ChangeType type, Film::Property p)
293 /* Here we should notice Film properties that affect our output, and
294 alert listeners that our output now would be different to how it was
295 last time we were run.
298 if (p == Film::CONTAINER) {
299 Change (type, PlayerProperty::FILM_CONTAINER, false);
300 } else if (p == Film::VIDEO_FRAME_RATE) {
301 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
302 so we need new pieces here.
304 if (type == CHANGE_TYPE_DONE) {
307 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
308 } else if (p == Film::AUDIO_PROCESSOR) {
309 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
310 boost::mutex::scoped_lock lm (_mutex);
311 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
313 } else if (p == Film::AUDIO_CHANNELS) {
314 if (type == CHANGE_TYPE_DONE) {
315 boost::mutex::scoped_lock lm (_mutex);
316 _audio_merger.clear ();
321 shared_ptr<PlayerVideo>
322 Player::black_player_video_frame (Eyes eyes) const
324 return shared_ptr<PlayerVideo> (
326 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
329 _video_container_size,
330 _video_container_size,
333 PresetColourConversion::all().front().conversion,
334 boost::weak_ptr<Content>(),
335 boost::optional<Frame>()
341 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
343 DCPTime s = t - piece->content->position ();
344 s = min (piece->content->length_after_trim(_film), s);
345 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
347 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
348 then convert that ContentTime to frames at the content's rate. However this fails for
349 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
350 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
352 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
354 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
358 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
360 /* See comment in dcp_to_content_video */
361 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
362 return d + piece->content->position();
366 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
368 DCPTime s = t - piece->content->position ();
369 s = min (piece->content->length_after_trim(_film), s);
370 /* See notes in dcp_to_content_video */
371 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
375 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 /* See comment in dcp_to_content_video */
378 return DCPTime::from_frames (f, _film->audio_frame_rate())
379 - DCPTime (piece->content->trim_start(), piece->frc)
380 + piece->content->position();
384 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
386 DCPTime s = t - piece->content->position ();
387 s = min (piece->content->length_after_trim(_film), s);
388 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
392 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
394 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
397 list<shared_ptr<Font> >
398 Player::get_subtitle_fonts ()
400 boost::mutex::scoped_lock lm (_mutex);
402 list<shared_ptr<Font> > fonts;
403 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
404 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
405 /* XXX: things may go wrong if there are duplicate font IDs
406 with different font files.
408 list<shared_ptr<Font> > f = j->fonts ();
409 copy (f.begin(), f.end(), back_inserter (fonts));
416 /** Set this player never to produce any video data */
418 Player::set_ignore_video ()
420 boost::mutex::scoped_lock lm (_mutex);
421 _ignore_video = true;
422 setup_pieces_unlocked ();
426 Player::set_ignore_audio ()
428 boost::mutex::scoped_lock lm (_mutex);
429 _ignore_audio = true;
430 setup_pieces_unlocked ();
434 Player::set_ignore_text ()
436 boost::mutex::scoped_lock lm (_mutex);
438 setup_pieces_unlocked ();
441 /** Set the player to always burn open texts into the image regardless of the content settings */
443 Player::set_always_burn_open_subtitles ()
445 boost::mutex::scoped_lock lm (_mutex);
446 _always_burn_open_subtitles = true;
449 /** Sets up the player to be faster, possibly at the expense of quality */
453 boost::mutex::scoped_lock lm (_mutex);
455 setup_pieces_unlocked ();
459 Player::set_play_referenced ()
461 boost::mutex::scoped_lock lm (_mutex);
462 _play_referenced = true;
463 setup_pieces_unlocked ();
467 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
469 DCPOMATIC_ASSERT (r);
470 r->set_entry_point (r->entry_point() + reel_trim_start);
471 r->set_duration (r->duration() - reel_trim_start - reel_trim_end);
472 if (r->duration() > 0) {
474 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->duration(), ffr)))
479 list<ReferencedReelAsset>
480 Player::get_reel_assets ()
482 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
484 list<ReferencedReelAsset> a;
486 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
487 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
492 scoped_ptr<DCPDecoder> decoder;
494 decoder.reset (new DCPDecoder (_film, j, false));
499 DCPOMATIC_ASSERT (j->video_frame_rate ());
500 double const cfr = j->video_frame_rate().get();
501 Frame const trim_start = j->trim_start().frames_round (cfr);
502 Frame const trim_end = j->trim_end().frames_round (cfr);
503 int const ffr = _film->video_frame_rate ();
505 /* position in the asset from the start */
506 int64_t offset_from_start = 0;
507 /* position in the asset from the end */
508 int64_t offset_from_end = 0;
509 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
510 /* Assume that main picture duration is the length of the reel */
511 offset_from_end += k->main_picture()->duration();
514 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
516 /* Assume that main picture duration is the length of the reel */
517 int64_t const reel_duration = k->main_picture()->duration();
519 /* See doc/design/trim_reels.svg */
520 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
521 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
523 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
524 if (j->reference_video ()) {
525 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
528 if (j->reference_audio ()) {
529 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
532 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
533 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
536 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
537 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
538 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
542 offset_from_start += reel_duration;
543 offset_from_end -= reel_duration;
553 boost::mutex::scoped_lock lm (_mutex);
556 /* We can't pass in this state */
560 if (_playlist->length(_film) == DCPTime()) {
561 /* Special case of an empty Film; just give one black frame */
562 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
566 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
568 shared_ptr<Piece> earliest_content;
569 optional<DCPTime> earliest_time;
571 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
576 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
577 if (t > i->content->end(_film)) {
581 /* Given two choices at the same time, pick the one with texts so we see it before
584 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
586 earliest_content = i;
600 if (earliest_content) {
604 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
605 earliest_time = _black.position ();
609 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
610 earliest_time = _silent.position ();
617 earliest_content->done = earliest_content->decoder->pass ();
618 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
619 if (dcp && !_play_referenced && dcp->reference_audio()) {
620 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
621 to `hide' the fact that no audio was emitted during the referenced DCP (though
622 we need to behave as though it was).
624 _last_audio_time = dcp->end (_film);
629 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
630 _black.set_position (_black.position() + one_video_frame());
634 DCPTimePeriod period (_silent.period_at_position());
635 if (_last_audio_time) {
636 /* Sometimes the thing that happened last finishes fractionally before
637 or after this silence. Bodge the start time of the silence to fix it.
638 I think this is nothing to worry about since we will just add or
639 remove a little silence at the end of some content.
641 int64_t const error = labs(period.from.get() - _last_audio_time->get());
642 /* Let's not worry about less than a frame at 24fps */
643 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
644 if (error >= too_much_error) {
645 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
647 DCPOMATIC_ASSERT (error < too_much_error);
648 period.from = *_last_audio_time;
650 if (period.duration() > one_video_frame()) {
651 period.to = period.from + one_video_frame();
654 _silent.set_position (period.to);
662 /* Emit any audio that is ready */
664 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
665 of our streams, or the position of the _silent.
667 DCPTime pull_to = _film->length ();
668 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
669 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
670 pull_to = i->second.last_push_end;
673 if (!_silent.done() && _silent.position() < pull_to) {
674 pull_to = _silent.position();
677 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
678 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
679 if (_last_audio_time && i->second < *_last_audio_time) {
680 /* This new data comes before the last we emitted (or the last seek); discard it */
681 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
686 } else if (_last_audio_time && i->second > *_last_audio_time) {
687 /* There's a gap between this data and the last we emitted; fill with silence */
688 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
691 emit_audio (i->first, i->second);
696 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
697 do_emit_video(i->first, i->second);
704 /** @return Open subtitles for the frame at the given time, converted to images */
705 optional<PositionImage>
706 Player::open_subtitles_for_frame (DCPTime time) const
708 list<PositionImage> captions;
709 int const vfr = _film->video_frame_rate();
713 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
716 /* Bitmap subtitles */
717 BOOST_FOREACH (BitmapText i, j.bitmap) {
722 /* i.image will already have been scaled to fit _video_container_size */
723 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
729 lrint (_video_container_size.width * i.rectangle.x),
730 lrint (_video_container_size.height * i.rectangle.y)
736 /* String subtitles (rendered to an image) */
737 if (!j.string.empty ()) {
738 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
739 copy (s.begin(), s.end(), back_inserter (captions));
743 if (captions.empty ()) {
744 return optional<PositionImage> ();
747 return merge (captions);
751 Player::video (weak_ptr<Piece> wp, ContentVideo video)
753 shared_ptr<Piece> piece = wp.lock ();
758 FrameRateChange frc (_film, piece->content);
759 if (frc.skip && (video.frame % 2) == 1) {
763 /* Time of the first frame we will emit */
764 DCPTime const time = content_video_to_dcp (piece, video.frame);
766 /* Discard if it's before the content's period or the last accurate seek. We can't discard
767 if it's after the content's period here as in that case we still need to fill any gap between
768 `now' and the end of the content's period.
770 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
774 /* Fill gaps that we discover now that we have some video which needs to be emitted.
775 This is where we need to fill to.
777 DCPTime fill_to = min (time, piece->content->end(_film));
779 if (_last_video_time) {
780 DCPTime fill_from = max (*_last_video_time, piece->content->position());
782 /* Fill if we have more than half a frame to do */
783 if ((fill_to - fill_from) > one_video_frame() / 2) {
784 LastVideoMap::const_iterator last = _last_video.find (wp);
785 if (_film->three_d()) {
786 Eyes fill_to_eyes = video.eyes;
787 if (fill_to_eyes == EYES_BOTH) {
788 fill_to_eyes = EYES_LEFT;
790 if (fill_to == piece->content->end(_film)) {
791 /* Don't fill after the end of the content */
792 fill_to_eyes = EYES_LEFT;
794 DCPTime j = fill_from;
795 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
796 if (eyes == EYES_BOTH) {
799 while (j < fill_to || eyes != fill_to_eyes) {
800 if (last != _last_video.end()) {
801 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
802 copy->set_eyes (eyes);
803 emit_video (copy, j);
805 emit_video (black_player_video_frame(eyes), j);
807 if (eyes == EYES_RIGHT) {
808 j += one_video_frame();
810 eyes = increment_eyes (eyes);
813 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
814 if (last != _last_video.end()) {
815 emit_video (last->second, j);
817 emit_video (black_player_video_frame(EYES_BOTH), j);
824 _last_video[wp].reset (
827 piece->content->video->crop (),
828 piece->content->video->fade (_film, video.frame),
829 piece->content->video->scale().size (
830 piece->content->video, _video_container_size, _film->frame_size ()
832 _video_container_size,
835 piece->content->video->colour_conversion(),
842 for (int i = 0; i < frc.repeat; ++i) {
843 if (t < piece->content->end(_film)) {
844 emit_video (_last_video[wp], t);
846 t += one_video_frame ();
851 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
853 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
855 shared_ptr<Piece> piece = wp.lock ();
860 shared_ptr<AudioContent> content = piece->content->audio;
861 DCPOMATIC_ASSERT (content);
863 /* Compute time in the DCP */
864 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
865 /* And the end of this block in the DCP */
866 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
868 /* Remove anything that comes before the start or after the end of the content */
869 if (time < piece->content->position()) {
870 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
872 /* This audio is entirely discarded */
875 content_audio.audio = cut.first;
877 } else if (time > piece->content->end(_film)) {
880 } else if (end > piece->content->end(_film)) {
881 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
882 if (remaining_frames == 0) {
885 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
886 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
887 content_audio.audio = cut;
890 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
894 if (content->gain() != 0) {
895 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
896 gain->apply_gain (content->gain ());
897 content_audio.audio = gain;
902 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
906 if (_audio_processor) {
907 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
912 _audio_merger.push (content_audio.audio, time);
913 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
914 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
918 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
920 shared_ptr<Piece> piece = wp.lock ();
921 shared_ptr<const TextContent> text = wc.lock ();
922 if (!piece || !text) {
926 /* Apply content's subtitle offsets */
927 subtitle.sub.rectangle.x += text->x_offset ();
928 subtitle.sub.rectangle.y += text->y_offset ();
930 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
931 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
932 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
934 /* Apply content's subtitle scale */
935 subtitle.sub.rectangle.width *= text->x_scale ();
936 subtitle.sub.rectangle.height *= text->y_scale ();
939 shared_ptr<Image> image = subtitle.sub.image;
941 /* We will scale the subtitle up to fit _video_container_size */
942 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
943 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
944 if (width == 0 || height == 0) {
948 dcp::Size scaled_size (width, height);
949 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
950 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
952 _active_texts[text->type()].add_from (wc, ps, from);
956 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
958 shared_ptr<Piece> piece = wp.lock ();
959 shared_ptr<const TextContent> text = wc.lock ();
960 if (!piece || !text) {
965 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
967 if (from > piece->content->end(_film)) {
971 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
972 s.set_h_position (s.h_position() + text->x_offset ());
973 s.set_v_position (s.v_position() + text->y_offset ());
974 float const xs = text->x_scale();
975 float const ys = text->y_scale();
976 float size = s.size();
978 /* Adjust size to express the common part of the scaling;
979 e.g. if xs = ys = 0.5 we scale size by 2.
981 if (xs > 1e-5 && ys > 1e-5) {
982 size *= 1 / min (1 / xs, 1 / ys);
986 /* Then express aspect ratio changes */
987 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
988 s.set_aspect_adjust (xs / ys);
991 s.set_in (dcp::Time(from.seconds(), 1000));
992 ps.string.push_back (StringText (s, text->outline_width()));
993 ps.add_fonts (text->fonts ());
996 _active_texts[text->type()].add_from (wc, ps, from);
1000 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1002 shared_ptr<const TextContent> text = wc.lock ();
1007 if (!_active_texts[text->type()].have(wc)) {
1011 shared_ptr<Piece> piece = wp.lock ();
1016 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1018 if (dcp_to > piece->content->end(_film)) {
1022 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1024 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1025 if (text->use() && !always && !text->burn()) {
1026 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1031 Player::seek (DCPTime time, bool accurate)
1033 boost::mutex::scoped_lock lm (_mutex);
1036 /* We can't seek in this state */
1041 _shuffler->clear ();
1046 if (_audio_processor) {
1047 _audio_processor->flush ();
1050 _audio_merger.clear ();
1051 for (int i = 0; i < TEXT_COUNT; ++i) {
1052 _active_texts[i].clear ();
1055 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1056 if (time < i->content->position()) {
1057 /* Before; seek to the start of the content */
1058 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1060 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1061 /* During; seek to position */
1062 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1065 /* After; this piece is done */
1071 _last_video_time = time;
1072 _last_video_eyes = EYES_LEFT;
1073 _last_audio_time = time;
1075 _last_video_time = optional<DCPTime>();
1076 _last_video_eyes = optional<Eyes>();
1077 _last_audio_time = optional<DCPTime>();
1080 _black.set_position (time);
1081 _silent.set_position (time);
1083 _last_video.clear ();
1087 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1089 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1090 player before the video that requires them.
1092 _delay.push_back (make_pair (pv, time));
1094 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1095 _last_video_time = time + one_video_frame();
1097 _last_video_eyes = increment_eyes (pv->eyes());
1099 if (_delay.size() < 3) {
1103 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1105 do_emit_video (to_do.first, to_do.second);
1109 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1111 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1112 for (int i = 0; i < TEXT_COUNT; ++i) {
1113 _active_texts[i].clear_before (time);
1117 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1119 pv->set_text (subtitles.get ());
1126 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1128 /* Log if the assert below is about to fail */
1129 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1130 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1133 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1134 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1135 Audio (data, time, _film->audio_frame_rate());
1136 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1140 Player::fill_audio (DCPTimePeriod period)
1142 if (period.from == period.to) {
1146 DCPOMATIC_ASSERT (period.from < period.to);
1148 DCPTime t = period.from;
1149 while (t < period.to) {
1150 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1151 Frame const samples = block.frames_round(_film->audio_frame_rate());
1153 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1154 silence->make_silent ();
1155 emit_audio (silence, t);
1162 Player::one_video_frame () const
1164 return DCPTime::from_frames (1, _film->video_frame_rate ());
1167 pair<shared_ptr<AudioBuffers>, DCPTime>
1168 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1170 DCPTime const discard_time = discard_to - time;
1171 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1172 Frame remaining_frames = audio->frames() - discard_frames;
1173 if (remaining_frames <= 0) {
1174 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1176 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1177 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1178 return make_pair(cut, time + discard_time);
1182 Player::set_dcp_decode_reduction (optional<int> reduction)
1184 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1187 boost::mutex::scoped_lock lm (_mutex);
1189 if (reduction == _dcp_decode_reduction) {
1191 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1195 _dcp_decode_reduction = reduction;
1196 setup_pieces_unlocked ();
1199 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1203 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1205 boost::mutex::scoped_lock lm (_mutex);
1207 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1208 if (i->content == content) {
1209 return content_time_to_dcp (i, t);
1213 /* We couldn't find this content; perhaps things are being changed over */
1214 return optional<DCPTime>();