2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101 /* The butler must hear about this first, so since we are proxying this through to the butler we must
104 _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105 _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106 set_video_container_size (_film->frame_size ());
108 film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
111 seek (DCPTime (), true);
120 Player::setup_pieces ()
122 boost::mutex::scoped_lock lm (_mutex);
123 setup_pieces_unlocked ();
127 have_video (shared_ptr<Piece> piece)
129 return piece->decoder && piece->decoder->video;
133 have_audio (shared_ptr<Piece> piece)
135 return piece->decoder && piece->decoder->audio;
139 Player::setup_pieces_unlocked ()
141 list<shared_ptr<Piece> > old_pieces = _pieces;
145 _shuffler = new Shuffler();
146 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
148 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
150 if (!i->paths_valid ()) {
154 if (_ignore_video && _ignore_audio && i->text.empty()) {
155 /* We're only interested in text and this content has none */
159 shared_ptr<Decoder> old_decoder;
160 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
161 if (j->content == i) {
162 old_decoder = j->decoder;
167 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, old_decoder);
168 FrameRateChange frc (_film, i);
171 /* Not something that we can decode; e.g. Atmos content */
175 if (decoder->video && _ignore_video) {
176 decoder->video->set_ignore (true);
179 if (decoder->audio && _ignore_audio) {
180 decoder->audio->set_ignore (true);
184 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
185 i->set_ignore (true);
189 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
191 dcp->set_decode_referenced (_play_referenced);
192 if (_play_referenced) {
193 dcp->set_forced_reduction (_dcp_decode_reduction);
197 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
198 _pieces.push_back (piece);
200 if (decoder->video) {
201 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
202 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
203 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
205 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
209 if (decoder->audio) {
210 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
213 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
215 while (j != decoder->text.end()) {
216 (*j)->BitmapStart.connect (
217 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219 (*j)->PlainStart.connect (
220 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
223 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
230 _stream_states.clear ();
231 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
232 if (i->content->audio) {
233 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
234 _stream_states[j] = StreamState (i, i->content->position ());
239 _black = Empty (_film, _pieces, bind(&have_video, _1));
240 _silent = Empty (_film, _pieces, bind(&have_audio, _1));
242 _last_video_time = DCPTime ();
243 _last_video_eyes = EYES_BOTH;
244 _last_audio_time = DCPTime ();
248 Player::playlist_content_change (ChangeType type, int property, bool frequent)
250 if (type == CHANGE_TYPE_PENDING) {
251 boost::mutex::scoped_lock lm (_mutex);
252 /* The player content is probably about to change, so we can't carry on
253 until that has happened and we've rebuilt our pieces. Stop pass()
254 and seek() from working until then.
257 } else if (type == CHANGE_TYPE_DONE) {
258 /* A change in our content has gone through. Re-build our pieces. */
261 } else if (type == CHANGE_TYPE_CANCELLED) {
262 boost::mutex::scoped_lock lm (_mutex);
266 Change (type, property, frequent);
270 Player::set_video_container_size (dcp::Size s)
272 Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
275 boost::mutex::scoped_lock lm (_mutex);
277 if (s == _video_container_size) {
279 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
283 _video_container_size = s;
285 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
286 _black_image->make_black ();
289 Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
293 Player::playlist_change (ChangeType type)
295 if (type == CHANGE_TYPE_DONE) {
298 Change (type, PlayerProperty::PLAYLIST, false);
302 Player::film_change (ChangeType type, Film::Property p)
304 /* Here we should notice Film properties that affect our output, and
305 alert listeners that our output now would be different to how it was
306 last time we were run.
309 if (p == Film::CONTAINER) {
310 Change (type, PlayerProperty::FILM_CONTAINER, false);
311 } else if (p == Film::VIDEO_FRAME_RATE) {
312 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
313 so we need new pieces here.
315 if (type == CHANGE_TYPE_DONE) {
318 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
319 } else if (p == Film::AUDIO_PROCESSOR) {
320 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
321 boost::mutex::scoped_lock lm (_mutex);
322 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
324 } else if (p == Film::AUDIO_CHANNELS) {
325 if (type == CHANGE_TYPE_DONE) {
326 boost::mutex::scoped_lock lm (_mutex);
327 _audio_merger.clear ();
332 shared_ptr<PlayerVideo>
333 Player::black_player_video_frame (Eyes eyes) const
335 return shared_ptr<PlayerVideo> (
337 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
340 _video_container_size,
341 _video_container_size,
344 PresetColourConversion::all().front().conversion,
346 boost::weak_ptr<Content>(),
347 boost::optional<Frame>()
353 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
355 DCPTime s = t - piece->content->position ();
356 s = min (piece->content->length_after_trim(_film), s);
357 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
359 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
360 then convert that ContentTime to frames at the content's rate. However this fails for
361 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
362 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
364 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
366 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
370 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
372 /* See comment in dcp_to_content_video */
373 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
374 return d + piece->content->position();
378 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
380 DCPTime s = t - piece->content->position ();
381 s = min (piece->content->length_after_trim(_film), s);
382 /* See notes in dcp_to_content_video */
383 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
387 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
389 /* See comment in dcp_to_content_video */
390 return DCPTime::from_frames (f, _film->audio_frame_rate())
391 - DCPTime (piece->content->trim_start(), piece->frc)
392 + piece->content->position();
396 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
398 DCPTime s = t - piece->content->position ();
399 s = min (piece->content->length_after_trim(_film), s);
400 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
404 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
406 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
409 list<shared_ptr<Font> >
410 Player::get_subtitle_fonts ()
412 boost::mutex::scoped_lock lm (_mutex);
414 list<shared_ptr<Font> > fonts;
415 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
416 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
417 /* XXX: things may go wrong if there are duplicate font IDs
418 with different font files.
420 list<shared_ptr<Font> > f = j->fonts ();
421 copy (f.begin(), f.end(), back_inserter (fonts));
428 /** Set this player never to produce any video data */
430 Player::set_ignore_video ()
432 boost::mutex::scoped_lock lm (_mutex);
433 _ignore_video = true;
434 setup_pieces_unlocked ();
438 Player::set_ignore_audio ()
440 boost::mutex::scoped_lock lm (_mutex);
441 _ignore_audio = true;
442 setup_pieces_unlocked ();
446 Player::set_ignore_text ()
448 boost::mutex::scoped_lock lm (_mutex);
450 setup_pieces_unlocked ();
453 /** Set the player to always burn open texts into the image regardless of the content settings */
455 Player::set_always_burn_open_subtitles ()
457 boost::mutex::scoped_lock lm (_mutex);
458 _always_burn_open_subtitles = true;
461 /** Sets up the player to be faster, possibly at the expense of quality */
465 boost::mutex::scoped_lock lm (_mutex);
467 setup_pieces_unlocked ();
471 Player::set_play_referenced ()
473 boost::mutex::scoped_lock lm (_mutex);
474 _play_referenced = true;
475 setup_pieces_unlocked ();
479 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
481 DCPOMATIC_ASSERT (r);
482 r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
483 r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
484 if (r->actual_duration() > 0) {
486 ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
491 list<ReferencedReelAsset>
492 Player::get_reel_assets ()
494 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
496 list<ReferencedReelAsset> a;
498 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
499 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
504 scoped_ptr<DCPDecoder> decoder;
506 decoder.reset (new DCPDecoder (_film, j, false));
511 DCPOMATIC_ASSERT (j->video_frame_rate ());
512 double const cfr = j->video_frame_rate().get();
513 Frame const trim_start = j->trim_start().frames_round (cfr);
514 Frame const trim_end = j->trim_end().frames_round (cfr);
515 int const ffr = _film->video_frame_rate ();
517 /* position in the asset from the start */
518 int64_t offset_from_start = 0;
519 /* position in the asset from the end */
520 int64_t offset_from_end = 0;
521 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
522 /* Assume that main picture duration is the length of the reel */
523 offset_from_end += k->main_picture()->actual_duration();
526 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
528 /* Assume that main picture duration is the length of the reel */
529 int64_t const reel_duration = k->main_picture()->actual_duration();
531 /* See doc/design/trim_reels.svg */
532 Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
533 Frame const reel_trim_end = min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
535 DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
536 if (j->reference_video ()) {
537 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
540 if (j->reference_audio ()) {
541 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
544 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
545 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
548 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
549 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
550 maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
554 offset_from_start += reel_duration;
555 offset_from_end -= reel_duration;
565 boost::mutex::scoped_lock lm (_mutex);
568 /* We can't pass in this state */
572 if (_playlist->length(_film) == DCPTime()) {
573 /* Special case of an empty Film; just give one black frame */
574 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
578 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
580 shared_ptr<Piece> earliest_content;
581 optional<DCPTime> earliest_time;
583 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
588 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
589 if (t > i->content->end(_film)) {
593 /* Given two choices at the same time, pick the one with texts so we see it before
596 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
598 earliest_content = i;
612 if (earliest_content) {
616 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
617 earliest_time = _black.position ();
621 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
622 earliest_time = _silent.position ();
629 earliest_content->done = earliest_content->decoder->pass ();
630 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
631 if (dcp && !_play_referenced && dcp->reference_audio()) {
632 /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
633 to `hide' the fact that no audio was emitted during the referenced DCP (though
634 we need to behave as though it was).
636 _last_audio_time = dcp->end (_film);
641 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
642 _black.set_position (_black.position() + one_video_frame());
646 DCPTimePeriod period (_silent.period_at_position());
647 if (_last_audio_time) {
648 /* Sometimes the thing that happened last finishes fractionally before
649 or after this silence. Bodge the start time of the silence to fix it.
651 DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
652 period.from = *_last_audio_time;
654 if (period.duration() > one_video_frame()) {
655 period.to = period.from + one_video_frame();
658 _silent.set_position (period.to);
666 /* Emit any audio that is ready */
668 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
669 of our streams, or the position of the _silent.
671 DCPTime pull_to = _film->length ();
672 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
673 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
674 pull_to = i->second.last_push_end;
677 if (!_silent.done() && _silent.position() < pull_to) {
678 pull_to = _silent.position();
681 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
682 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
683 if (_last_audio_time && i->second < *_last_audio_time) {
684 /* This new data comes before the last we emitted (or the last seek); discard it */
685 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
690 } else if (_last_audio_time && i->second > *_last_audio_time) {
691 /* There's a gap between this data and the last we emitted; fill with silence */
692 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
695 emit_audio (i->first, i->second);
700 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
701 do_emit_video(i->first, i->second);
708 /** @return Open subtitles for the frame at the given time, converted to images */
709 optional<PositionImage>
710 Player::open_subtitles_for_frame (DCPTime time) const
712 list<PositionImage> captions;
713 int const vfr = _film->video_frame_rate();
717 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
720 /* Bitmap subtitles */
721 BOOST_FOREACH (BitmapText i, j.bitmap) {
726 /* i.image will already have been scaled to fit _video_container_size */
727 dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
733 lrint (_video_container_size.width * i.rectangle.x),
734 lrint (_video_container_size.height * i.rectangle.y)
740 /* String subtitles (rendered to an image) */
741 if (!j.string.empty ()) {
742 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
743 copy (s.begin(), s.end(), back_inserter (captions));
747 if (captions.empty ()) {
748 return optional<PositionImage> ();
751 return merge (captions);
755 Player::video (weak_ptr<Piece> wp, ContentVideo video)
757 shared_ptr<Piece> piece = wp.lock ();
762 FrameRateChange frc (_film, piece->content);
763 if (frc.skip && (video.frame % 2) == 1) {
767 /* Time of the first frame we will emit */
768 DCPTime const time = content_video_to_dcp (piece, video.frame);
770 /* Discard if it's before the content's period or the last accurate seek. We can't discard
771 if it's after the content's period here as in that case we still need to fill any gap between
772 `now' and the end of the content's period.
774 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
778 /* Fill gaps that we discover now that we have some video which needs to be emitted.
779 This is where we need to fill to.
781 DCPTime fill_to = min (time, piece->content->end(_film));
783 if (_last_video_time) {
784 DCPTime fill_from = max (*_last_video_time, piece->content->position());
786 /* Fill if we have more than half a frame to do */
787 if ((fill_to - fill_from) > one_video_frame() / 2) {
788 LastVideoMap::const_iterator last = _last_video.find (wp);
789 if (_film->three_d()) {
790 Eyes fill_to_eyes = video.eyes;
791 if (fill_to_eyes == EYES_BOTH) {
792 fill_to_eyes = EYES_LEFT;
794 if (fill_to == piece->content->end(_film)) {
795 /* Don't fill after the end of the content */
796 fill_to_eyes = EYES_LEFT;
798 DCPTime j = fill_from;
799 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
800 if (eyes == EYES_BOTH) {
803 while (j < fill_to || eyes != fill_to_eyes) {
804 if (last != _last_video.end()) {
805 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
806 copy->set_eyes (eyes);
807 emit_video (copy, j);
809 emit_video (black_player_video_frame(eyes), j);
811 if (eyes == EYES_RIGHT) {
812 j += one_video_frame();
814 eyes = increment_eyes (eyes);
817 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
818 if (last != _last_video.end()) {
819 emit_video (last->second, j);
821 emit_video (black_player_video_frame(EYES_BOTH), j);
828 _last_video[wp].reset (
831 piece->content->video->crop (),
832 piece->content->video->fade (_film, video.frame),
833 piece->content->video->scale().size (
834 piece->content->video, _video_container_size, _film->frame_size ()
836 _video_container_size,
839 piece->content->video->colour_conversion(),
840 piece->content->video->range(),
847 for (int i = 0; i < frc.repeat; ++i) {
848 if (t < piece->content->end(_film)) {
849 emit_video (_last_video[wp], t);
851 t += one_video_frame ();
856 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
858 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
860 shared_ptr<Piece> piece = wp.lock ();
865 shared_ptr<AudioContent> content = piece->content->audio;
866 DCPOMATIC_ASSERT (content);
868 int const rfr = content->resampled_frame_rate (_film);
870 /* Compute time in the DCP */
871 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
872 /* And the end of this block in the DCP */
873 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
875 /* Remove anything that comes before the start or after the end of the content */
876 if (time < piece->content->position()) {
877 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
879 /* This audio is entirely discarded */
882 content_audio.audio = cut.first;
884 } else if (time > piece->content->end(_film)) {
887 } else if (end > piece->content->end(_film)) {
888 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
889 if (remaining_frames == 0) {
892 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
893 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
894 content_audio.audio = cut;
897 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
901 if (content->gain() != 0) {
902 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
903 gain->apply_gain (content->gain ());
904 content_audio.audio = gain;
909 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
913 if (_audio_processor) {
914 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
919 _audio_merger.push (content_audio.audio, time);
920 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
921 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
925 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
927 shared_ptr<Piece> piece = wp.lock ();
928 shared_ptr<const TextContent> text = wc.lock ();
929 if (!piece || !text) {
933 /* Apply content's subtitle offsets */
934 subtitle.sub.rectangle.x += text->x_offset ();
935 subtitle.sub.rectangle.y += text->y_offset ();
937 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
938 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
939 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
941 /* Apply content's subtitle scale */
942 subtitle.sub.rectangle.width *= text->x_scale ();
943 subtitle.sub.rectangle.height *= text->y_scale ();
946 shared_ptr<Image> image = subtitle.sub.image;
947 /* We will scale the subtitle up to fit _video_container_size */
948 dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
949 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
950 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
952 _active_texts[text->type()].add_from (wc, ps, from);
956 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
958 shared_ptr<Piece> piece = wp.lock ();
959 shared_ptr<const TextContent> text = wc.lock ();
960 if (!piece || !text) {
965 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
967 if (from > piece->content->end(_film)) {
971 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
972 s.set_h_position (s.h_position() + text->x_offset ());
973 s.set_v_position (s.v_position() + text->y_offset ());
974 float const xs = text->x_scale();
975 float const ys = text->y_scale();
976 float size = s.size();
978 /* Adjust size to express the common part of the scaling;
979 e.g. if xs = ys = 0.5 we scale size by 2.
981 if (xs > 1e-5 && ys > 1e-5) {
982 size *= 1 / min (1 / xs, 1 / ys);
986 /* Then express aspect ratio changes */
987 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
988 s.set_aspect_adjust (xs / ys);
991 s.set_in (dcp::Time(from.seconds(), 1000));
992 ps.string.push_back (StringText (s, text->outline_width()));
993 ps.add_fonts (text->fonts ());
996 _active_texts[text->type()].add_from (wc, ps, from);
1000 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1002 shared_ptr<const TextContent> text = wc.lock ();
1007 if (!_active_texts[text->type()].have(wc)) {
1011 shared_ptr<Piece> piece = wp.lock ();
1016 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1018 if (dcp_to > piece->content->end(_film)) {
1022 pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1024 bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1025 if (text->use() && !always && !text->burn()) {
1026 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1031 Player::seek (DCPTime time, bool accurate)
1033 boost::mutex::scoped_lock lm (_mutex);
1036 /* We can't seek in this state */
1041 _shuffler->clear ();
1046 if (_audio_processor) {
1047 _audio_processor->flush ();
1050 _audio_merger.clear ();
1051 for (int i = 0; i < TEXT_COUNT; ++i) {
1052 _active_texts[i].clear ();
1055 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1056 if (time < i->content->position()) {
1057 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
1058 we must seek this (following) content accurately, otherwise when we come to the end of the current
1059 content we may not start right at the beginning of the next, causing a gap (if the next content has
1060 been trimmed to a point between keyframes, or something).
1062 i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1064 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1065 /* During; seek to position */
1066 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1069 /* After; this piece is done */
1075 _last_video_time = time;
1076 _last_video_eyes = EYES_LEFT;
1077 _last_audio_time = time;
1079 _last_video_time = optional<DCPTime>();
1080 _last_video_eyes = optional<Eyes>();
1081 _last_audio_time = optional<DCPTime>();
1084 _black.set_position (time);
1085 _silent.set_position (time);
1087 _last_video.clear ();
1091 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1093 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1094 player before the video that requires them.
1096 _delay.push_back (make_pair (pv, time));
1098 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1099 _last_video_time = time + one_video_frame();
1101 _last_video_eyes = increment_eyes (pv->eyes());
1103 if (_delay.size() < 3) {
1107 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1109 do_emit_video (to_do.first, to_do.second);
1113 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1115 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1116 for (int i = 0; i < TEXT_COUNT; ++i) {
1117 _active_texts[i].clear_before (time);
1121 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1123 pv->set_text (subtitles.get ());
1130 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1132 /* Log if the assert below is about to fail */
1133 if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1134 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1137 /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1138 DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1139 Audio (data, time, _film->audio_frame_rate());
1140 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1144 Player::fill_audio (DCPTimePeriod period)
1146 if (period.from == period.to) {
1150 DCPOMATIC_ASSERT (period.from < period.to);
1152 DCPTime t = period.from;
1153 while (t < period.to) {
1154 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1155 Frame const samples = block.frames_round(_film->audio_frame_rate());
1157 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1158 silence->make_silent ();
1159 emit_audio (silence, t);
1166 Player::one_video_frame () const
1168 return DCPTime::from_frames (1, _film->video_frame_rate ());
1171 pair<shared_ptr<AudioBuffers>, DCPTime>
1172 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1174 DCPTime const discard_time = discard_to - time;
1175 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1176 Frame remaining_frames = audio->frames() - discard_frames;
1177 if (remaining_frames <= 0) {
1178 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1180 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1181 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1182 return make_pair(cut, time + discard_time);
1186 Player::set_dcp_decode_reduction (optional<int> reduction)
1188 Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1191 boost::mutex::scoped_lock lm (_mutex);
1193 if (reduction == _dcp_decode_reduction) {
1195 Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1199 _dcp_decode_reduction = reduction;
1200 setup_pieces_unlocked ();
1203 Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1207 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1209 boost::mutex::scoped_lock lm (_mutex);
1211 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1212 if (i->content == content) {
1213 return content_time_to_dcp (i, t);
1217 /* We couldn't find this content; perhaps things are being changed over */
1218 return optional<DCPTime>();