2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_caption (false)
94 , _play_referenced (false)
95 , _audio_merger (_film->audio_frame_rate())
98 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
99 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
100 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
101 set_video_container_size (_film->frame_size ());
103 film_changed (Film::AUDIO_PROCESSOR);
105 seek (DCPTime (), true);
114 Player::setup_pieces ()
119 _shuffler = new Shuffler();
120 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
122 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
124 if (!i->paths_valid ()) {
128 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
129 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
132 /* Not something that we can decode; e.g. Atmos content */
136 if (decoder->video && _ignore_video) {
137 decoder->video->set_ignore (true);
140 if (_ignore_caption) {
141 BOOST_FOREACH (shared_ptr<CaptionDecoder> i, decoder->caption) {
142 i->set_ignore (true);
146 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
148 dcp->set_decode_referenced (_play_referenced);
149 if (_play_referenced) {
150 dcp->set_forced_reduction (_dcp_decode_reduction);
154 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
155 _pieces.push_back (piece);
157 if (decoder->video) {
158 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
159 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
160 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
162 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
166 if (decoder->audio) {
167 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
170 list<shared_ptr<CaptionDecoder> >::const_iterator j = decoder->caption.begin();
172 while (j != decoder->caption.end()) {
173 (*j)->BitmapStart.connect (
174 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
176 (*j)->PlainStart.connect (
177 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
180 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1, _2)
187 _stream_states.clear ();
188 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
189 if (i->content->audio) {
190 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
191 _stream_states[j] = StreamState (i, i->content->position ());
196 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
197 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
199 _last_video_time = DCPTime ();
200 _last_video_eyes = EYES_BOTH;
201 _last_audio_time = DCPTime ();
202 _have_valid_pieces = true;
206 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
208 shared_ptr<Content> c = w.lock ();
214 property == ContentProperty::POSITION ||
215 property == ContentProperty::LENGTH ||
216 property == ContentProperty::TRIM_START ||
217 property == ContentProperty::TRIM_END ||
218 property == ContentProperty::PATH ||
219 property == VideoContentProperty::FRAME_TYPE ||
220 property == VideoContentProperty::COLOUR_CONVERSION ||
221 property == AudioContentProperty::STREAMS ||
222 property == DCPContentProperty::NEEDS_ASSETS ||
223 property == DCPContentProperty::NEEDS_KDM ||
224 property == CaptionContentProperty::COLOUR ||
225 property == CaptionContentProperty::EFFECT ||
226 property == CaptionContentProperty::EFFECT_COLOUR ||
227 property == FFmpegContentProperty::SUBTITLE_STREAM ||
228 property == FFmpegContentProperty::FILTERS
231 _have_valid_pieces = false;
232 Changed (property, frequent);
235 property == CaptionContentProperty::LINE_SPACING ||
236 property == CaptionContentProperty::OUTLINE_WIDTH ||
237 property == CaptionContentProperty::Y_SCALE ||
238 property == CaptionContentProperty::FADE_IN ||
239 property == CaptionContentProperty::FADE_OUT ||
240 property == ContentProperty::VIDEO_FRAME_RATE ||
241 property == CaptionContentProperty::USE ||
242 property == CaptionContentProperty::X_OFFSET ||
243 property == CaptionContentProperty::Y_OFFSET ||
244 property == CaptionContentProperty::X_SCALE ||
245 property == CaptionContentProperty::FONTS ||
246 property == CaptionContentProperty::TYPE ||
247 property == VideoContentProperty::CROP ||
248 property == VideoContentProperty::SCALE ||
249 property == VideoContentProperty::FADE_IN ||
250 property == VideoContentProperty::FADE_OUT
253 Changed (property, frequent);
258 Player::set_video_container_size (dcp::Size s)
260 if (s == _video_container_size) {
264 _video_container_size = s;
266 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
267 _black_image->make_black ();
269 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
273 Player::playlist_changed ()
275 _have_valid_pieces = false;
276 Changed (PlayerProperty::PLAYLIST, false);
280 Player::film_changed (Film::Property p)
282 /* Here we should notice Film properties that affect our output, and
283 alert listeners that our output now would be different to how it was
284 last time we were run.
287 if (p == Film::CONTAINER) {
288 Changed (PlayerProperty::FILM_CONTAINER, false);
289 } else if (p == Film::VIDEO_FRAME_RATE) {
290 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
291 so we need new pieces here.
293 _have_valid_pieces = false;
294 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
295 } else if (p == Film::AUDIO_PROCESSOR) {
296 if (_film->audio_processor ()) {
297 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
299 } else if (p == Film::AUDIO_CHANNELS) {
300 _audio_merger.clear ();
305 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
307 list<PositionImage> all;
309 for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
314 /* We will scale the subtitle up to fit _video_container_size */
315 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
321 dcp::YUV_TO_RGB_REC601,
322 i->image->pixel_format (),
327 lrint (_video_container_size.width * i->rectangle.x),
328 lrint (_video_container_size.height * i->rectangle.y)
337 shared_ptr<PlayerVideo>
338 Player::black_player_video_frame (Eyes eyes) const
340 return shared_ptr<PlayerVideo> (
342 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
345 _video_container_size,
346 _video_container_size,
349 PresetColourConversion::all().front().conversion,
350 boost::weak_ptr<Content>(),
351 boost::optional<Frame>()
357 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
359 DCPTime s = t - piece->content->position ();
360 s = min (piece->content->length_after_trim(), s);
361 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
363 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
364 then convert that ContentTime to frames at the content's rate. However this fails for
365 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
366 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
368 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
370 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
374 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
376 /* See comment in dcp_to_content_video */
377 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
378 return d + piece->content->position();
382 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
384 DCPTime s = t - piece->content->position ();
385 s = min (piece->content->length_after_trim(), s);
386 /* See notes in dcp_to_content_video */
387 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
391 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
393 /* See comment in dcp_to_content_video */
394 return DCPTime::from_frames (f, _film->audio_frame_rate())
395 - DCPTime (piece->content->trim_start(), piece->frc)
396 + piece->content->position();
400 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
402 DCPTime s = t - piece->content->position ();
403 s = min (piece->content->length_after_trim(), s);
404 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
408 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
410 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
413 list<shared_ptr<Font> >
414 Player::get_subtitle_fonts ()
416 if (!_have_valid_pieces) {
420 list<shared_ptr<Font> > fonts;
421 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
422 BOOST_FOREACH (shared_ptr<CaptionContent> j, i->content->caption) {
423 /* XXX: things may go wrong if there are duplicate font IDs
424 with different font files.
426 list<shared_ptr<Font> > f = j->fonts ();
427 copy (f.begin(), f.end(), back_inserter (fonts));
434 /** Set this player never to produce any video data */
436 Player::set_ignore_video ()
438 _ignore_video = true;
442 Player::set_ignore_caption ()
444 _ignore_caption = true;
447 /** Set a type of caption that this player should always burn into the image,
448 * regardless of the content settings.
449 * @param type type of captions to burn.
452 Player::set_always_burn_captions (CaptionType type)
454 _always_burn_captions = type;
457 /** Sets up the player to be faster, possibly at the expense of quality */
462 _have_valid_pieces = false;
466 Player::set_play_referenced ()
468 _play_referenced = true;
469 _have_valid_pieces = false;
472 list<ReferencedReelAsset>
473 Player::get_reel_assets ()
475 list<ReferencedReelAsset> a;
477 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
478 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
483 scoped_ptr<DCPDecoder> decoder;
485 decoder.reset (new DCPDecoder (j, _film->log(), false));
491 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
493 DCPOMATIC_ASSERT (j->video_frame_rate ());
494 double const cfr = j->video_frame_rate().get();
495 Frame const trim_start = j->trim_start().frames_round (cfr);
496 Frame const trim_end = j->trim_end().frames_round (cfr);
497 int const ffr = _film->video_frame_rate ();
499 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
500 if (j->reference_video ()) {
501 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
502 DCPOMATIC_ASSERT (ra);
503 ra->set_entry_point (ra->entry_point() + trim_start);
504 ra->set_duration (ra->duration() - trim_start - trim_end);
506 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
510 if (j->reference_audio ()) {
511 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
512 DCPOMATIC_ASSERT (ra);
513 ra->set_entry_point (ra->entry_point() + trim_start);
514 ra->set_duration (ra->duration() - trim_start - trim_end);
516 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
520 if (j->reference_caption (CAPTION_OPEN)) {
521 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
522 DCPOMATIC_ASSERT (ra);
523 ra->set_entry_point (ra->entry_point() + trim_start);
524 ra->set_duration (ra->duration() - trim_start - trim_end);
526 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
530 if (j->reference_caption (CAPTION_CLOSED)) {
531 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
532 DCPOMATIC_ASSERT (ra);
533 ra->set_entry_point (ra->entry_point() + trim_start);
534 ra->set_duration (ra->duration() - trim_start - trim_end);
536 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
540 /* Assume that main picture duration is the length of the reel */
541 offset += k->main_picture()->duration ();
551 if (!_have_valid_pieces) {
555 if (_playlist->length() == DCPTime()) {
556 /* Special case of an empty Film; just give one black frame */
557 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
561 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
563 shared_ptr<Piece> earliest_content;
564 optional<DCPTime> earliest_time;
566 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
571 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
572 if (t > i->content->end()) {
576 /* Given two choices at the same time, pick the one with captions so we see it before
579 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->caption.empty())) {
581 earliest_content = i;
595 if (earliest_content) {
599 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
600 earliest_time = _black.position ();
604 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
605 earliest_time = _silent.position ();
611 earliest_content->done = earliest_content->decoder->pass ();
614 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
615 _black.set_position (_black.position() + one_video_frame());
619 DCPTimePeriod period (_silent.period_at_position());
620 if (_last_audio_time) {
621 /* Sometimes the thing that happened last finishes fractionally before
622 this silence. Bodge the start time of the silence to fix it. I'm
623 not sure if this is the right solution --- maybe the last thing should
624 be padded `forward' rather than this thing padding `back'.
626 period.from = min(period.from, *_last_audio_time);
628 if (period.duration() > one_video_frame()) {
629 period.to = period.from + one_video_frame();
632 _silent.set_position (period.to);
640 /* Emit any audio that is ready */
642 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
643 of our streams, or the position of the _silent.
645 DCPTime pull_to = _film->length ();
646 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
647 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
648 pull_to = i->second.last_push_end;
651 if (!_silent.done() && _silent.position() < pull_to) {
652 pull_to = _silent.position();
655 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
656 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
657 if (_last_audio_time && i->second < *_last_audio_time) {
658 /* This new data comes before the last we emitted (or the last seek); discard it */
659 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
664 } else if (_last_audio_time && i->second > *_last_audio_time) {
665 /* There's a gap between this data and the last we emitted; fill with silence */
666 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
669 emit_audio (i->first, i->second);
674 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
675 do_emit_video(i->first, i->second);
682 optional<PositionImage>
683 Player::captions_for_frame (DCPTime time) const
685 list<PositionImage> captions;
687 int const vfr = _film->video_frame_rate();
689 for (int i = 0; i < CAPTION_COUNT; ++i) {
690 bool const always = _always_burn_captions && *_always_burn_captions == i;
693 _active_captions[i].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), always)
696 /* Image subtitles */
697 list<PositionImage> c = transform_bitmap_captions (j.image);
698 copy (c.begin(), c.end(), back_inserter (captions));
700 /* Text subtitles (rendered to an image) */
701 if (!j.text.empty ()) {
702 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
703 copy (s.begin(), s.end(), back_inserter (captions));
708 if (captions.empty ()) {
709 return optional<PositionImage> ();
712 return merge (captions);
716 Player::video (weak_ptr<Piece> wp, ContentVideo video)
718 shared_ptr<Piece> piece = wp.lock ();
723 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
724 if (frc.skip && (video.frame % 2) == 1) {
728 /* Time of the first frame we will emit */
729 DCPTime const time = content_video_to_dcp (piece, video.frame);
731 /* Discard if it's before the content's period or the last accurate seek. We can't discard
732 if it's after the content's period here as in that case we still need to fill any gap between
733 `now' and the end of the content's period.
735 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
739 /* Fill gaps that we discover now that we have some video which needs to be emitted.
740 This is where we need to fill to.
742 DCPTime fill_to = min (time, piece->content->end());
744 if (_last_video_time) {
745 DCPTime fill_from = max (*_last_video_time, piece->content->position());
746 LastVideoMap::const_iterator last = _last_video.find (wp);
747 if (_film->three_d()) {
748 DCPTime j = fill_from;
749 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
750 if (eyes == EYES_BOTH) {
753 while (j < fill_to || eyes != video.eyes) {
754 if (last != _last_video.end()) {
755 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
756 copy->set_eyes (eyes);
757 emit_video (copy, j);
759 emit_video (black_player_video_frame(eyes), j);
761 if (eyes == EYES_RIGHT) {
762 j += one_video_frame();
764 eyes = increment_eyes (eyes);
767 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
768 if (last != _last_video.end()) {
769 emit_video (last->second, j);
771 emit_video (black_player_video_frame(EYES_BOTH), j);
777 _last_video[wp].reset (
780 piece->content->video->crop (),
781 piece->content->video->fade (video.frame),
782 piece->content->video->scale().size (
783 piece->content->video, _video_container_size, _film->frame_size ()
785 _video_container_size,
788 piece->content->video->colour_conversion(),
795 for (int i = 0; i < frc.repeat; ++i) {
796 if (t < piece->content->end()) {
797 emit_video (_last_video[wp], t);
799 t += one_video_frame ();
804 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
806 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
808 shared_ptr<Piece> piece = wp.lock ();
813 shared_ptr<AudioContent> content = piece->content->audio;
814 DCPOMATIC_ASSERT (content);
816 /* Compute time in the DCP */
817 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
818 /* And the end of this block in the DCP */
819 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
821 /* Remove anything that comes before the start or after the end of the content */
822 if (time < piece->content->position()) {
823 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
825 /* This audio is entirely discarded */
828 content_audio.audio = cut.first;
830 } else if (time > piece->content->end()) {
833 } else if (end > piece->content->end()) {
834 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
835 if (remaining_frames == 0) {
838 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
839 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
840 content_audio.audio = cut;
843 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
847 if (content->gain() != 0) {
848 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
849 gain->apply_gain (content->gain ());
850 content_audio.audio = gain;
855 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
859 if (_audio_processor) {
860 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
865 _audio_merger.push (content_audio.audio, time);
866 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
867 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
871 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentBitmapCaption subtitle)
873 shared_ptr<Piece> piece = wp.lock ();
874 shared_ptr<const CaptionContent> caption = wc.lock ();
875 if (!piece || !caption) {
879 /* Apply content's subtitle offsets */
880 subtitle.sub.rectangle.x += caption->x_offset ();
881 subtitle.sub.rectangle.y += caption->y_offset ();
883 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
884 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((caption->x_scale() - 1) / 2);
885 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((caption->y_scale() - 1) / 2);
887 /* Apply content's subtitle scale */
888 subtitle.sub.rectangle.width *= caption->x_scale ();
889 subtitle.sub.rectangle.height *= caption->y_scale ();
892 ps.image.push_back (subtitle.sub);
893 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
895 _active_captions[subtitle.type()].add_from (wc, ps, from);
899 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTextCaption subtitle)
901 shared_ptr<Piece> piece = wp.lock ();
902 shared_ptr<const CaptionContent> caption = wc.lock ();
903 if (!piece || !caption) {
908 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
910 if (from > piece->content->end()) {
914 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
915 s.set_h_position (s.h_position() + caption->x_offset ());
916 s.set_v_position (s.v_position() + caption->y_offset ());
917 float const xs = caption->x_scale();
918 float const ys = caption->y_scale();
919 float size = s.size();
921 /* Adjust size to express the common part of the scaling;
922 e.g. if xs = ys = 0.5 we scale size by 2.
924 if (xs > 1e-5 && ys > 1e-5) {
925 size *= 1 / min (1 / xs, 1 / ys);
929 /* Then express aspect ratio changes */
930 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
931 s.set_aspect_adjust (xs / ys);
934 s.set_in (dcp::Time(from.seconds(), 1000));
935 ps.text.push_back (TextCaption (s, caption->outline_width()));
936 ps.add_fonts (caption->fonts ());
939 _active_captions[subtitle.type()].add_from (wc, ps, from);
943 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTime to, CaptionType type)
945 if (!_active_captions[type].have (wc)) {
949 shared_ptr<Piece> piece = wp.lock ();
950 shared_ptr<const CaptionContent> caption = wc.lock ();
951 if (!piece || !caption) {
955 DCPTime const dcp_to = content_time_to_dcp (piece, to);
957 if (dcp_to > piece->content->end()) {
961 pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wc, dcp_to);
963 bool const always = _always_burn_captions && *_always_burn_captions == type;
964 if (caption->use() && !always && !caption->burn()) {
965 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
970 Player::seek (DCPTime time, bool accurate)
972 if (!_have_valid_pieces) {
982 if (_audio_processor) {
983 _audio_processor->flush ();
986 _audio_merger.clear ();
987 for (int i = 0; i < CAPTION_COUNT; ++i) {
988 _active_captions[i].clear ();
991 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
992 if (time < i->content->position()) {
993 /* Before; seek to the start of the content */
994 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
996 } else if (i->content->position() <= time && time < i->content->end()) {
997 /* During; seek to position */
998 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1001 /* After; this piece is done */
1007 _last_video_time = time;
1008 _last_video_eyes = EYES_LEFT;
1009 _last_audio_time = time;
1011 _last_video_time = optional<DCPTime>();
1012 _last_video_eyes = optional<Eyes>();
1013 _last_audio_time = optional<DCPTime>();
1016 _black.set_position (time);
1017 _silent.set_position (time);
1019 _last_video.clear ();
1023 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1025 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1026 player before the video that requires them.
1028 _delay.push_back (make_pair (pv, time));
1030 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1031 _last_video_time = time + one_video_frame();
1033 _last_video_eyes = increment_eyes (pv->eyes());
1035 if (_delay.size() < 3) {
1039 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1041 do_emit_video (to_do.first, to_do.second);
1045 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1047 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1048 for (int i = 0; i < CAPTION_COUNT; ++i) {
1049 _active_captions[i].clear_before (time);
1053 optional<PositionImage> captions = captions_for_frame (time);
1055 pv->set_caption (captions.get ());
1062 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1064 /* Log if the assert below is about to fail */
1065 if (_last_audio_time && time != *_last_audio_time) {
1066 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1069 /* This audio must follow on from the previous */
1070 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1072 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1076 Player::fill_audio (DCPTimePeriod period)
1078 if (period.from == period.to) {
1082 DCPOMATIC_ASSERT (period.from < period.to);
1084 DCPTime t = period.from;
1085 while (t < period.to) {
1086 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1087 Frame const samples = block.frames_round(_film->audio_frame_rate());
1089 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1090 silence->make_silent ();
1091 emit_audio (silence, t);
1098 Player::one_video_frame () const
1100 return DCPTime::from_frames (1, _film->video_frame_rate ());
1103 pair<shared_ptr<AudioBuffers>, DCPTime>
1104 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1106 DCPTime const discard_time = discard_to - time;
1107 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1108 Frame remaining_frames = audio->frames() - discard_frames;
1109 if (remaining_frames <= 0) {
1110 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1112 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1113 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1114 return make_pair(cut, time + discard_time);
1118 Player::set_dcp_decode_reduction (optional<int> reduction)
1120 if (reduction == _dcp_decode_reduction) {
1124 _dcp_decode_reduction = reduction;
1125 _have_valid_pieces = false;
1126 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1130 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1132 if (_have_valid_pieces) {
1136 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1137 if (i->content == content) {
1138 return content_time_to_dcp (i, t);
1142 DCPOMATIC_ASSERT (false);