2 Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
79 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
80 int const PlayerProperty::PLAYLIST = 701;
81 int const PlayerProperty::FILM_CONTAINER = 702;
82 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
83 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87 , _playlist (playlist)
89 , _ignore_video (false)
90 , _ignore_audio (false)
91 , _ignore_text (false)
92 , _always_burn_open_subtitles (false)
94 , _play_referenced (false)
95 , _audio_merger (_film->audio_frame_rate())
98 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
99 /* The butler must hear about this first, so since we are proxying this through to the butler we must
102 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
103 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
104 set_video_container_size (_film->frame_size ());
106 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109 seek (DCPTime (), true);
118 Player::setup_pieces ()
120 boost::mutex::scoped_lock lm (_mutex);
121 setup_pieces_unlocked ();
125 have_video (shared_ptr<Piece> piece)
127 return piece->decoder && piece->decoder->video;
131 have_audio (shared_ptr<Piece> piece)
133 return piece->decoder && piece->decoder->audio;
137 Player::setup_pieces_unlocked ()
142 _shuffler = new Shuffler();
143 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
145 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
147 if (!i->paths_valid ()) {
151 if (_ignore_video && _ignore_audio && i->text.empty()) {
152 /* We're only interested in text and this content has none */
156 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
157 FrameRateChange frc (_film, i);
160 /* Not something that we can decode; e.g. Atmos content */
164 if (decoder->video && _ignore_video) {
165 decoder->video->set_ignore (true);
168 if (decoder->audio && _ignore_audio) {
169 decoder->audio->set_ignore (true);
173 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
174 i->set_ignore (true);
178 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
180 dcp->set_decode_referenced (_play_referenced);
181 if (_play_referenced) {
182 dcp->set_forced_reduction (_dcp_decode_reduction);
186 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
187 _pieces.push_back (piece);
189 if (decoder->video) {
190 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
191 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
192 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
194 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
198 if (decoder->audio) {
199 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
202 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
204 while (j != decoder->text.end()) {
205 (*j)->BitmapStart.connect (
206 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
208 (*j)->PlainStart.connect (
209 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
212 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219 _stream_states.clear ();
220 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
221 if (i->content->audio) {
222 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
223 _stream_states[j] = StreamState (i, i->content->position ());
228 _black = Empty (_film, _pieces, bind(&have_video, _1));
229 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
231 _last_video_time = DCPTime ();
232 _last_video_eyes = EYES_BOTH;
233 _last_audio_time = DCPTime ();
237 Player::playlist_content_change (ChangeType type, int property, bool frequent)
239 if (type == CHANGE_TYPE_PENDING) {
240 /* The player content is probably about to change, so we can't carry on
241 until that has happened and we've rebuilt our pieces. Stop pass()
242 and seek() from working until then.
245 } else if (type == CHANGE_TYPE_DONE) {
246 /* A change in our content has gone through. Re-build our pieces. */
249 } else if (type == CHANGE_TYPE_CANCELLED) {
253 Change (type, property, frequent);
257 Player::set_video_container_size (dcp::Size s)
259 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
262 boost::mutex::scoped_lock lm (_mutex);
264 if (s == _video_container_size) {
266 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
270 _video_container_size = s;
272 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
273 _black_image->make_black ();
276 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
280 Player::playlist_change (ChangeType type)
282 if (type == CHANGE_TYPE_DONE) {
285 Change (type, PlayerProperty::PLAYLIST, false);
289 Player::film_change (ChangeType type, Film::Property p)
291 /* Here we should notice Film properties that affect our output, and
292 alert listeners that our output now would be different to how it was
293 last time we were run.
296 if (p == Film::CONTAINER) {
297 Change (type, PlayerProperty::FILM_CONTAINER, false);
298 } else if (p == Film::VIDEO_FRAME_RATE) {
299 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
300 so we need new pieces here.
302 if (type == CHANGE_TYPE_DONE) {
305 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
306 } else if (p == Film::AUDIO_PROCESSOR) {
307 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
308 boost::mutex::scoped_lock lm (_mutex);
309 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
311 } else if (p == Film::AUDIO_CHANNELS) {
312 if (type == CHANGE_TYPE_DONE) {
313 boost::mutex::scoped_lock lm (_mutex);
314 _audio_merger.clear ();
319 shared_ptr<PlayerVideo>
320 Player::black_player_video_frame (Eyes eyes) const
322 return shared_ptr<PlayerVideo> (
324 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
327 _video_container_size,
328 _video_container_size,
331 PresetColourConversion::all().front().conversion,
332 boost::weak_ptr<Content>(),
333 boost::optional<Frame>()
339 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
341 DCPTime s = t - piece->content->position ();
342 s = min (piece->content->length_after_trim(_film), s);
343 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
345 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
346 then convert that ContentTime to frames at the content's rate. However this fails for
347 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
348 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
350 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
352 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
356 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
358 /* See comment in dcp_to_content_video */
359 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
360 return d + piece->content->position();
364 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
366 DCPTime s = t - piece->content->position ();
367 s = min (piece->content->length_after_trim(_film), s);
368 /* See notes in dcp_to_content_video */
369 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
373 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
375 /* See comment in dcp_to_content_video */
376 return DCPTime::from_frames (f, _film->audio_frame_rate())
377 - DCPTime (piece->content->trim_start(), piece->frc)
378 + piece->content->position();
382 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
384 DCPTime s = t - piece->content->position ();
385 s = min (piece->content->length_after_trim(_film), s);
386 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
390 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
392 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
395 list<shared_ptr<Font> >
396 Player::get_subtitle_fonts ()
398 boost::mutex::scoped_lock lm (_mutex);
400 list<shared_ptr<Font> > fonts;
401 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
402 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
403 /* XXX: things may go wrong if there are duplicate font IDs
404 with different font files.
406 list<shared_ptr<Font> > f = j->fonts ();
407 copy (f.begin(), f.end(), back_inserter (fonts));
414 /** Set this player never to produce any video data */
416 Player::set_ignore_video ()
418 boost::mutex::scoped_lock lm (_mutex);
419 _ignore_video = true;
420 setup_pieces_unlocked ();
424 Player::set_ignore_audio ()
426 boost::mutex::scoped_lock lm (_mutex);
427 _ignore_audio = true;
428 setup_pieces_unlocked ();
432 Player::set_ignore_text ()
434 boost::mutex::scoped_lock lm (_mutex);
436 setup_pieces_unlocked ();
439 /** Set the player to always burn open texts into the image regardless of the content settings */
441 Player::set_always_burn_open_subtitles ()
443 boost::mutex::scoped_lock lm (_mutex);
444 _always_burn_open_subtitles = true;
447 /** Sets up the player to be faster, possibly at the expense of quality */
451 boost::mutex::scoped_lock lm (_mutex);
453 setup_pieces_unlocked ();
457 Player::set_play_referenced ()
459 boost::mutex::scoped_lock lm (_mutex);
460 _play_referenced = true;
461 setup_pieces_unlocked ();
465 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
467 DCPOMATIC_ASSERT (r);
468 r->set_entry_point (r->entry_point() + reel_trim_start);
469 r->set_duration (r->duration() - reel_trim_start - reel_trim_end);
470 if (r->duration() > 0) {
472 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->duration(), ffr)))
477 list<ReferencedReelAsset>
478 Player::get_reel_assets ()
480 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
482 list<ReferencedReelAsset> a;
484 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
485 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
490 scoped_ptr<DCPDecoder> decoder;
492 decoder.reset (new DCPDecoder (_film, j, false));
497 DCPOMATIC_ASSERT (j->video_frame_rate ());
498 double const cfr = j->video_frame_rate().get();
499 Frame const trim_start = j->trim_start().frames_round (cfr);
500 Frame const trim_end = j->trim_end().frames_round (cfr);
501 int const ffr = _film->video_frame_rate ();
503 /* position in the asset from the start */
504 int64_t offset_from_start = 0;
505 /* position in the asset from the end */
506 int64_t offset_from_end = 0;
507 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
508 /* Assume that main picture duration is the length of the reel */
509 offset_from_end += k->main_picture()->duration();
512 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
514 /* Assume that main picture duration is the length of the reel */
515 int64_t const reel_duration = k->main_picture()->duration();
517 /* See doc/design/trim_reels.svg */
518 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
519 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
521 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
522 if (j->reference_video ()) {
523 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
526 if (j->reference_audio ()) {
527 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
530 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
531 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
534 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
535 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
536 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
540 offset_from_start += reel_duration;
541 offset_from_end -= reel_duration;
551 boost::mutex::scoped_lock lm (_mutex);
554 /* We can't pass in this state */
558 if (_playlist->length(_film) == DCPTime()) {
559 /* Special case of an empty Film; just give one black frame */
560 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
564 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
566 shared_ptr<Piece> earliest_content;
567 optional<DCPTime> earliest_time;
569 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
574 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
575 if (t > i->content->end(_film)) {
579 /* Given two choices at the same time, pick the one with texts so we see it before
582 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
584 earliest_content = i;
598 if (earliest_content) {
602 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
603 earliest_time = _black.position ();
607 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
608 earliest_time = _silent.position ();
615 earliest_content->done = earliest_content->decoder->pass ();
616 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
617 if (dcp && !_play_referenced && dcp->reference_audio()) {
618 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
619 to `hide' the fact that no audio was emitted during the referenced DCP (though
620 we need to behave as though it was).
622 _last_audio_time = dcp->end (_film);
627 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
628 _black.set_position (_black.position() + one_video_frame());
632 DCPTimePeriod period (_silent.period_at_position());
633 if (_last_audio_time) {
634 /* Sometimes the thing that happened last finishes fractionally before
635 or after this silence. Bodge the start time of the silence to fix it.
636 I think this is nothing to worry about since we will just add or
637 remove a little silence at the end of some content.
639 int64_t const error = labs(period.from.get() - _last_audio_time->get());
640 /* Let's not worry about less than a frame at 24fps */
641 int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
642 if (error >= too_much_error) {
643 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
645 DCPOMATIC_ASSERT (error < too_much_error);
646 period.from = *_last_audio_time;
648 if (period.duration() > one_video_frame()) {
649 period.to = period.from + one_video_frame();
652 _silent.set_position (period.to);
660 /* Emit any audio that is ready */
662 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
663 of our streams, or the position of the _silent.
665 DCPTime pull_to = _film->length ();
666 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
667 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
668 pull_to = i->second.last_push_end;
671 if (!_silent.done() && _silent.position() < pull_to) {
672 pull_to = _silent.position();
675 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
676 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
677 if (_last_audio_time && i->second < *_last_audio_time) {
678 /* This new data comes before the last we emitted (or the last seek); discard it */
679 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
684 } else if (_last_audio_time && i->second > *_last_audio_time) {
685 /* There's a gap between this data and the last we emitted; fill with silence */
686 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
689 emit_audio (i->first, i->second);
694 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
695 do_emit_video(i->first, i->second);
702 /** @return Open subtitles for the frame at the given time, converted to images */
703 optional<PositionImage>
704 Player::open_subtitles_for_frame (DCPTime time) const
706 list<PositionImage> captions;
707 int const vfr = _film->video_frame_rate();
711 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
714 /* Bitmap subtitles */
715 BOOST_FOREACH (BitmapText i, j.bitmap) {
720 /* i.image will already have been scaled to fit _video_container_size */
721 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
727 lrint (_video_container_size.width * i.rectangle.x),
728 lrint (_video_container_size.height * i.rectangle.y)
734 /* String subtitles (rendered to an image) */
735 if (!j.string.empty ()) {
736 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
737 copy (s.begin(), s.end(), back_inserter (captions));
741 if (captions.empty ()) {
742 return optional<PositionImage> ();
745 return merge (captions);
749 Player::video (weak_ptr<Piece> wp, ContentVideo video)
751 shared_ptr<Piece> piece = wp.lock ();
756 FrameRateChange frc (_film, piece->content);
757 if (frc.skip && (video.frame % 2) == 1) {
761 /* Time of the first frame we will emit */
762 DCPTime const time = content_video_to_dcp (piece, video.frame);
764 /* Discard if it's before the content's period or the last accurate seek. We can't discard
765 if it's after the content's period here as in that case we still need to fill any gap between
766 `now' and the end of the content's period.
768 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
772 /* Fill gaps that we discover now that we have some video which needs to be emitted.
773 This is where we need to fill to.
775 DCPTime fill_to = min (time, piece->content->end(_film));
777 if (_last_video_time) {
778 DCPTime fill_from = max (*_last_video_time, piece->content->position());
780 /* Fill if we have more than half a frame to do */
781 if ((fill_to - fill_from) > one_video_frame() / 2) {
782 LastVideoMap::const_iterator last = _last_video.find (wp);
783 if (_film->three_d()) {
784 Eyes fill_to_eyes = video.eyes;
785 if (fill_to_eyes == EYES_BOTH) {
786 fill_to_eyes = EYES_LEFT;
788 if (fill_to == piece->content->end(_film)) {
789 /* Don't fill after the end of the content */
790 fill_to_eyes = EYES_LEFT;
792 DCPTime j = fill_from;
793 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
794 if (eyes == EYES_BOTH) {
797 while (j < fill_to || eyes != fill_to_eyes) {
798 if (last != _last_video.end()) {
799 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
800 copy->set_eyes (eyes);
801 emit_video (copy, j);
803 emit_video (black_player_video_frame(eyes), j);
805 if (eyes == EYES_RIGHT) {
806 j += one_video_frame();
808 eyes = increment_eyes (eyes);
811 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
812 if (last != _last_video.end()) {
813 emit_video (last->second, j);
815 emit_video (black_player_video_frame(EYES_BOTH), j);
822 _last_video[wp].reset (
825 piece->content->video->crop (),
826 piece->content->video->fade (_film, video.frame),
827 piece->content->video->scale().size (
828 piece->content->video, _video_container_size, _film->frame_size ()
830 _video_container_size,
833 piece->content->video->colour_conversion(),
840 for (int i = 0; i < frc.repeat; ++i) {
841 if (t < piece->content->end(_film)) {
842 emit_video (_last_video[wp], t);
844 t += one_video_frame ();
849 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
851 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
853 shared_ptr<Piece> piece = wp.lock ();
858 shared_ptr<AudioContent> content = piece->content->audio;
859 DCPOMATIC_ASSERT (content);
861 int const rfr = content->resampled_frame_rate (_film);
863 /* Compute time in the DCP */
864 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
865 /* And the end of this block in the DCP */
866 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
868 /* Remove anything that comes before the start or after the end of the content */
869 if (time < piece->content->position()) {
870 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
872 /* This audio is entirely discarded */
875 content_audio.audio = cut.first;
877 } else if (time > piece->content->end(_film)) {
880 } else if (end > piece->content->end(_film)) {
881 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
882 if (remaining_frames == 0) {
885 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
886 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
887 content_audio.audio = cut;
890 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
894 if (content->gain() != 0) {
895 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
896 gain->apply_gain (content->gain ());
897 content_audio.audio = gain;
902 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
906 if (_audio_processor) {
907 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
912 _audio_merger.push (content_audio.audio, time);
913 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
914 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
918 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
920 shared_ptr<Piece> piece = wp.lock ();
921 shared_ptr<const TextContent> text = wc.lock ();
922 if (!piece || !text) {
926 /* Apply content's subtitle offsets */
927 subtitle.sub.rectangle.x += text->x_offset ();
928 subtitle.sub.rectangle.y += text->y_offset ();
930 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
931 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
932 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
934 /* Apply content's subtitle scale */
935 subtitle.sub.rectangle.width *= text->x_scale ();
936 subtitle.sub.rectangle.height *= text->y_scale ();
939 shared_ptr<Image> image = subtitle.sub.image;
941 /* We will scale the subtitle up to fit _video_container_size */
942 int const width = subtitle.sub.rectangle.width * _video_container_size.width;
943 int const height = subtitle.sub.rectangle.height * _video_container_size.height;
944 if (width == 0 || height == 0) {
948 dcp::Size scaled_size (width, height);
949 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
950 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
952 _active_texts[text->type()].add_from (wc, ps, from);
956 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
958 shared_ptr<Piece> piece = wp.lock ();
959 shared_ptr<const TextContent> text = wc.lock ();
960 if (!piece || !text) {
965 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
967 if (from > piece->content->end(_film)) {
971 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
972 s.set_h_position (s.h_position() + text->x_offset ());
973 s.set_v_position (s.v_position() + text->y_offset ());
974 float const xs = text->x_scale();
975 float const ys = text->y_scale();
976 float size = s.size();
978 /* Adjust size to express the common part of the scaling;
979 e.g. if xs = ys = 0.5 we scale size by 2.
981 if (xs > 1e-5 && ys > 1e-5) {
982 size *= 1 / min (1 / xs, 1 / ys);
986 /* Then express aspect ratio changes */
987 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
988 s.set_aspect_adjust (xs / ys);
991 s.set_in (dcp::Time(from.seconds(), 1000));
992 ps.string.push_back (StringText (s, text->outline_width()));
993 ps.add_fonts (text->fonts ());
996 _active_texts[text->type()].add_from (wc, ps, from);
1000 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1002 shared_ptr<const TextContent> text = wc.lock ();
1007 if (!_active_texts[text->type()].have(wc)) {
1011 shared_ptr<Piece> piece = wp.lock ();
1016 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1018 if (dcp_to > piece->content->end(_film)) {
1022 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1024 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1025 if (text->use() && !always && !text->burn()) {
1026 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1031 Player::seek (DCPTime time, bool accurate)
1033 boost::mutex::scoped_lock lm (_mutex);
1036 /* We can't seek in this state */
1041 _shuffler->clear ();
1046 if (_audio_processor) {
1047 _audio_processor->flush ();
1050 _audio_merger.clear ();
1051 for (int i = 0; i < TEXT_COUNT; ++i) {
1052 _active_texts[i].clear ();
1055 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1056 if (time < i->content->position()) {
1057 /* Before; seek to the start of the content */
1058 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1060 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1061 /* During; seek to position */
1062 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1065 /* After; this piece is done */
1071 _last_video_time = time;
1072 _last_video_eyes = EYES_LEFT;
1073 _last_audio_time = time;
1075 _last_video_time = optional<DCPTime>();
1076 _last_video_eyes = optional<Eyes>();
1077 _last_audio_time = optional<DCPTime>();
1080 _black.set_position (time);
1081 _silent.set_position (time);
1083 _last_video.clear ();
1087 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1089 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1090 player before the video that requires them.
1092 _delay.push_back (make_pair (pv, time));
1094 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1095 _last_video_time = time + one_video_frame();
1097 _last_video_eyes = increment_eyes (pv->eyes());
1099 if (_delay.size() < 3) {
1103 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1105 do_emit_video (to_do.first, to_do.second);
1109 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1111 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1112 for (int i = 0; i < TEXT_COUNT; ++i) {
1113 _active_texts[i].clear_before (time);
1117 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1119 pv->set_text (subtitles.get ());
1126 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1128 /* Log if the assert below is about to fail */
1129 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1130 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1133 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1134 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1135 Audio (data, time, _film->audio_frame_rate());
1136 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1140 Player::fill_audio (DCPTimePeriod period)
1142 if (period.from == period.to) {
1146 DCPOMATIC_ASSERT (period.from < period.to);
1148 DCPTime t = period.from;
1149 while (t < period.to) {
1150 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1151 Frame const samples = block.frames_round(_film->audio_frame_rate());
1153 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1154 silence->make_silent ();
1155 emit_audio (silence, t);
1162 Player::one_video_frame () const
1164 return DCPTime::from_frames (1, _film->video_frame_rate ());
1167 pair<shared_ptr<AudioBuffers>, DCPTime>
1168 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1170 DCPTime const discard_time = discard_to - time;
1171 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1172 Frame remaining_frames = audio->frames() - discard_frames;
1173 if (remaining_frames <= 0) {
1174 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1176 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1177 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1178 return make_pair(cut, time + discard_time);
1182 Player::set_dcp_decode_reduction (optional<int> reduction)
1184 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1187 boost::mutex::scoped_lock lm (_mutex);
1189 if (reduction == _dcp_decode_reduction) {
1191 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1195 _dcp_decode_reduction = reduction;
1196 setup_pieces_unlocked ();
1199 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1203 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1205 boost::mutex::scoped_lock lm (_mutex);
1207 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1208 if (i->content == content) {
1209 return content_time_to_dcp (i, t);
1213 /* We couldn't find this content; perhaps things are being changed over */
1214 return optional<DCPTime>();