2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
90 , _have_valid_pieces (false)
91 , _ignore_video (false)
92 , _ignore_caption (false)
94 , _play_referenced (false)
95 , _audio_merger (_film->audio_frame_rate())
98 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
99 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
100 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
101 set_video_container_size (_film->frame_size ());
103 film_changed (Film::AUDIO_PROCESSOR);
105 seek (DCPTime (), true);
114 Player::setup_pieces ()
119 _shuffler = new Shuffler();
120 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
122 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
124 if (!i->paths_valid ()) {
128 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
129 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
132 /* Not something that we can decode; e.g. Atmos content */
136 if (decoder->video && _ignore_video) {
137 decoder->video->set_ignore (true);
140 if (_ignore_caption) {
141 BOOST_FOREACH (shared_ptr<CaptionDecoder> i, decoder->caption) {
142 i->set_ignore (true);
146 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
148 dcp->set_decode_referenced (_play_referenced);
149 if (_play_referenced) {
150 dcp->set_forced_reduction (_dcp_decode_reduction);
154 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
155 _pieces.push_back (piece);
157 if (decoder->video) {
158 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
159 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
160 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
162 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
166 if (decoder->audio) {
167 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
170 list<shared_ptr<CaptionDecoder> >::const_iterator j = decoder->caption.begin();
172 while (j != decoder->caption.end()) {
173 (*j)->BitmapStart.connect (
174 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
176 (*j)->PlainStart.connect (
177 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
180 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1, _2)
187 _stream_states.clear ();
188 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
189 if (i->content->audio) {
190 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
191 _stream_states[j] = StreamState (i, i->content->position ());
196 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
197 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
199 _last_video_time = DCPTime ();
200 _last_video_eyes = EYES_BOTH;
201 _last_audio_time = DCPTime ();
202 _have_valid_pieces = true;
206 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
208 shared_ptr<Content> c = w.lock ();
214 property == ContentProperty::POSITION ||
215 property == ContentProperty::LENGTH ||
216 property == ContentProperty::TRIM_START ||
217 property == ContentProperty::TRIM_END ||
218 property == ContentProperty::PATH ||
219 property == VideoContentProperty::FRAME_TYPE ||
220 property == VideoContentProperty::COLOUR_CONVERSION ||
221 property == AudioContentProperty::STREAMS ||
222 property == DCPContentProperty::NEEDS_ASSETS ||
223 property == DCPContentProperty::NEEDS_KDM ||
224 property == CaptionContentProperty::COLOUR ||
225 property == CaptionContentProperty::EFFECT ||
226 property == CaptionContentProperty::EFFECT_COLOUR ||
227 property == FFmpegContentProperty::SUBTITLE_STREAM ||
228 property == FFmpegContentProperty::FILTERS
231 _have_valid_pieces = false;
232 Changed (property, frequent);
235 property == CaptionContentProperty::LINE_SPACING ||
236 property == CaptionContentProperty::OUTLINE_WIDTH ||
237 property == CaptionContentProperty::Y_SCALE ||
238 property == CaptionContentProperty::FADE_IN ||
239 property == CaptionContentProperty::FADE_OUT ||
240 property == ContentProperty::VIDEO_FRAME_RATE ||
241 property == CaptionContentProperty::USE ||
242 property == CaptionContentProperty::X_OFFSET ||
243 property == CaptionContentProperty::Y_OFFSET ||
244 property == CaptionContentProperty::X_SCALE ||
245 property == CaptionContentProperty::FONTS ||
246 property == CaptionContentProperty::TYPE ||
247 property == VideoContentProperty::CROP ||
248 property == VideoContentProperty::SCALE ||
249 property == VideoContentProperty::FADE_IN ||
250 property == VideoContentProperty::FADE_OUT
253 Changed (property, frequent);
258 Player::set_video_container_size (dcp::Size s)
260 if (s == _video_container_size) {
264 _video_container_size = s;
266 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
267 _black_image->make_black ();
269 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
273 Player::playlist_changed ()
275 _have_valid_pieces = false;
276 Changed (PlayerProperty::PLAYLIST, false);
280 Player::film_changed (Film::Property p)
282 /* Here we should notice Film properties that affect our output, and
283 alert listeners that our output now would be different to how it was
284 last time we were run.
287 if (p == Film::CONTAINER) {
288 Changed (PlayerProperty::FILM_CONTAINER, false);
289 } else if (p == Film::VIDEO_FRAME_RATE) {
290 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
291 so we need new pieces here.
293 _have_valid_pieces = false;
294 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
295 } else if (p == Film::AUDIO_PROCESSOR) {
296 if (_film->audio_processor ()) {
297 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
299 } else if (p == Film::AUDIO_CHANNELS) {
300 _audio_merger.clear ();
305 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
307 list<PositionImage> all;
309 for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
314 /* We will scale the subtitle up to fit _video_container_size */
315 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
321 dcp::YUV_TO_RGB_REC601,
322 i->image->pixel_format (),
327 lrint (_video_container_size.width * i->rectangle.x),
328 lrint (_video_container_size.height * i->rectangle.y)
337 shared_ptr<PlayerVideo>
338 Player::black_player_video_frame (Eyes eyes) const
340 return shared_ptr<PlayerVideo> (
342 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
345 _video_container_size,
346 _video_container_size,
349 PresetColourConversion::all().front().conversion,
350 boost::weak_ptr<Content>(),
351 boost::optional<Frame>()
357 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
359 DCPTime s = t - piece->content->position ();
360 s = min (piece->content->length_after_trim(), s);
361 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
363 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
364 then convert that ContentTime to frames at the content's rate. However this fails for
365 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
366 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
368 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
370 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
374 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
376 /* See comment in dcp_to_content_video */
377 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
378 return d + piece->content->position();
382 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
384 DCPTime s = t - piece->content->position ();
385 s = min (piece->content->length_after_trim(), s);
386 /* See notes in dcp_to_content_video */
387 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
391 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
393 /* See comment in dcp_to_content_video */
394 return DCPTime::from_frames (f, _film->audio_frame_rate())
395 - DCPTime (piece->content->trim_start(), piece->frc)
396 + piece->content->position();
400 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
402 DCPTime s = t - piece->content->position ();
403 s = min (piece->content->length_after_trim(), s);
404 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
408 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
410 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
413 list<shared_ptr<Font> >
414 Player::get_subtitle_fonts ()
416 if (!_have_valid_pieces) {
420 list<shared_ptr<Font> > fonts;
421 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
422 BOOST_FOREACH (shared_ptr<CaptionContent> j, i->content->caption) {
423 /* XXX: things may go wrong if there are duplicate font IDs
424 with different font files.
426 list<shared_ptr<Font> > f = j->fonts ();
427 copy (f.begin(), f.end(), back_inserter (fonts));
434 /** Set this player never to produce any video data */
436 Player::set_ignore_video ()
438 _ignore_video = true;
442 Player::set_ignore_caption ()
444 _ignore_caption = true;
447 /** Set the player to always burn open captions into the image regardless of the content settings */
449 Player::set_always_burn_open_captions ()
451 _always_burn_open_captions = true;
454 /** Sets up the player to be faster, possibly at the expense of quality */
459 _have_valid_pieces = false;
463 Player::set_play_referenced ()
465 _play_referenced = true;
466 _have_valid_pieces = false;
469 list<ReferencedReelAsset>
470 Player::get_reel_assets ()
472 list<ReferencedReelAsset> a;
474 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
475 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
480 scoped_ptr<DCPDecoder> decoder;
482 decoder.reset (new DCPDecoder (j, _film->log(), false));
488 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
490 DCPOMATIC_ASSERT (j->video_frame_rate ());
491 double const cfr = j->video_frame_rate().get();
492 Frame const trim_start = j->trim_start().frames_round (cfr);
493 Frame const trim_end = j->trim_end().frames_round (cfr);
494 int const ffr = _film->video_frame_rate ();
496 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
497 if (j->reference_video ()) {
498 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
499 DCPOMATIC_ASSERT (ra);
500 ra->set_entry_point (ra->entry_point() + trim_start);
501 ra->set_duration (ra->duration() - trim_start - trim_end);
503 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
507 if (j->reference_audio ()) {
508 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
509 DCPOMATIC_ASSERT (ra);
510 ra->set_entry_point (ra->entry_point() + trim_start);
511 ra->set_duration (ra->duration() - trim_start - trim_end);
513 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
517 if (j->reference_caption (CAPTION_OPEN)) {
518 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
519 DCPOMATIC_ASSERT (ra);
520 ra->set_entry_point (ra->entry_point() + trim_start);
521 ra->set_duration (ra->duration() - trim_start - trim_end);
523 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
527 if (j->reference_caption (CAPTION_CLOSED)) {
528 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
529 DCPOMATIC_ASSERT (ra);
530 ra->set_entry_point (ra->entry_point() + trim_start);
531 ra->set_duration (ra->duration() - trim_start - trim_end);
533 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
537 /* Assume that main picture duration is the length of the reel */
538 offset += k->main_picture()->duration ();
548 if (!_have_valid_pieces) {
552 if (_playlist->length() == DCPTime()) {
553 /* Special case of an empty Film; just give one black frame */
554 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
558 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
560 shared_ptr<Piece> earliest_content;
561 optional<DCPTime> earliest_time;
563 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
568 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
569 if (t > i->content->end()) {
573 /* Given two choices at the same time, pick the one with captions so we see it before
576 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->caption.empty())) {
578 earliest_content = i;
592 if (earliest_content) {
596 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
597 earliest_time = _black.position ();
601 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
602 earliest_time = _silent.position ();
608 earliest_content->done = earliest_content->decoder->pass ();
611 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
612 _black.set_position (_black.position() + one_video_frame());
616 DCPTimePeriod period (_silent.period_at_position());
617 if (_last_audio_time) {
618 /* Sometimes the thing that happened last finishes fractionally before
619 this silence. Bodge the start time of the silence to fix it. I'm
620 not sure if this is the right solution --- maybe the last thing should
621 be padded `forward' rather than this thing padding `back'.
623 period.from = min(period.from, *_last_audio_time);
625 if (period.duration() > one_video_frame()) {
626 period.to = period.from + one_video_frame();
629 _silent.set_position (period.to);
637 /* Emit any audio that is ready */
639 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
640 of our streams, or the position of the _silent.
642 DCPTime pull_to = _film->length ();
643 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
644 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
645 pull_to = i->second.last_push_end;
648 if (!_silent.done() && _silent.position() < pull_to) {
649 pull_to = _silent.position();
652 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
653 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
654 if (_last_audio_time && i->second < *_last_audio_time) {
655 /* This new data comes before the last we emitted (or the last seek); discard it */
656 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
661 } else if (_last_audio_time && i->second > *_last_audio_time) {
662 /* There's a gap between this data and the last we emitted; fill with silence */
663 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
666 emit_audio (i->first, i->second);
671 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
672 do_emit_video(i->first, i->second);
680 Player::closed_captions_for_frame (DCPTime time) const
682 return _active_captions[CAPTION_CLOSED].get (
683 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
687 /** @return Open captions for the frame at the given time, converted to images */
688 optional<PositionImage>
689 Player::open_captions_for_frame (DCPTime time) const
691 list<PositionImage> captions;
692 int const vfr = _film->video_frame_rate();
696 _active_captions[CAPTION_OPEN].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_captions)
699 /* Image subtitles */
700 list<PositionImage> c = transform_bitmap_captions (j.image);
701 copy (c.begin(), c.end(), back_inserter (captions));
703 /* Text subtitles (rendered to an image) */
704 if (!j.text.empty ()) {
705 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
706 copy (s.begin(), s.end(), back_inserter (captions));
710 if (captions.empty ()) {
711 return optional<PositionImage> ();
714 return merge (captions);
718 Player::video (weak_ptr<Piece> wp, ContentVideo video)
720 shared_ptr<Piece> piece = wp.lock ();
725 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
726 if (frc.skip && (video.frame % 2) == 1) {
730 /* Time of the first frame we will emit */
731 DCPTime const time = content_video_to_dcp (piece, video.frame);
733 /* Discard if it's before the content's period or the last accurate seek. We can't discard
734 if it's after the content's period here as in that case we still need to fill any gap between
735 `now' and the end of the content's period.
737 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
741 /* Fill gaps that we discover now that we have some video which needs to be emitted.
742 This is where we need to fill to.
744 DCPTime fill_to = min (time, piece->content->end());
746 if (_last_video_time) {
747 DCPTime fill_from = max (*_last_video_time, piece->content->position());
748 LastVideoMap::const_iterator last = _last_video.find (wp);
749 if (_film->three_d()) {
750 DCPTime j = fill_from;
751 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
752 if (eyes == EYES_BOTH) {
755 while (j < fill_to || eyes != video.eyes) {
756 if (last != _last_video.end()) {
757 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
758 copy->set_eyes (eyes);
759 emit_video (copy, j);
761 emit_video (black_player_video_frame(eyes), j);
763 if (eyes == EYES_RIGHT) {
764 j += one_video_frame();
766 eyes = increment_eyes (eyes);
769 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
770 if (last != _last_video.end()) {
771 emit_video (last->second, j);
773 emit_video (black_player_video_frame(EYES_BOTH), j);
779 _last_video[wp].reset (
782 piece->content->video->crop (),
783 piece->content->video->fade (video.frame),
784 piece->content->video->scale().size (
785 piece->content->video, _video_container_size, _film->frame_size ()
787 _video_container_size,
790 piece->content->video->colour_conversion(),
797 for (int i = 0; i < frc.repeat; ++i) {
798 if (t < piece->content->end()) {
799 emit_video (_last_video[wp], t);
801 t += one_video_frame ();
806 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
808 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
810 shared_ptr<Piece> piece = wp.lock ();
815 shared_ptr<AudioContent> content = piece->content->audio;
816 DCPOMATIC_ASSERT (content);
818 /* Compute time in the DCP */
819 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
820 /* And the end of this block in the DCP */
821 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
823 /* Remove anything that comes before the start or after the end of the content */
824 if (time < piece->content->position()) {
825 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
827 /* This audio is entirely discarded */
830 content_audio.audio = cut.first;
832 } else if (time > piece->content->end()) {
835 } else if (end > piece->content->end()) {
836 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
837 if (remaining_frames == 0) {
840 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
841 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
842 content_audio.audio = cut;
845 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
849 if (content->gain() != 0) {
850 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
851 gain->apply_gain (content->gain ());
852 content_audio.audio = gain;
857 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
861 if (_audio_processor) {
862 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
867 _audio_merger.push (content_audio.audio, time);
868 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
869 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
873 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentBitmapCaption subtitle)
875 shared_ptr<Piece> piece = wp.lock ();
876 shared_ptr<const CaptionContent> caption = wc.lock ();
877 if (!piece || !caption) {
881 /* Apply content's subtitle offsets */
882 subtitle.sub.rectangle.x += caption->x_offset ();
883 subtitle.sub.rectangle.y += caption->y_offset ();
885 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
886 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((caption->x_scale() - 1) / 2);
887 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((caption->y_scale() - 1) / 2);
889 /* Apply content's subtitle scale */
890 subtitle.sub.rectangle.width *= caption->x_scale ();
891 subtitle.sub.rectangle.height *= caption->y_scale ();
894 ps.image.push_back (subtitle.sub);
895 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
897 _active_captions[subtitle.type()].add_from (wc, ps, from);
901 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTextCaption subtitle)
903 shared_ptr<Piece> piece = wp.lock ();
904 shared_ptr<const CaptionContent> caption = wc.lock ();
905 if (!piece || !caption) {
910 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
912 if (from > piece->content->end()) {
916 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
917 s.set_h_position (s.h_position() + caption->x_offset ());
918 s.set_v_position (s.v_position() + caption->y_offset ());
919 float const xs = caption->x_scale();
920 float const ys = caption->y_scale();
921 float size = s.size();
923 /* Adjust size to express the common part of the scaling;
924 e.g. if xs = ys = 0.5 we scale size by 2.
926 if (xs > 1e-5 && ys > 1e-5) {
927 size *= 1 / min (1 / xs, 1 / ys);
931 /* Then express aspect ratio changes */
932 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
933 s.set_aspect_adjust (xs / ys);
936 s.set_in (dcp::Time(from.seconds(), 1000));
937 ps.text.push_back (TextCaption (s, caption->outline_width()));
938 ps.add_fonts (caption->fonts ());
941 _active_captions[subtitle.type()].add_from (wc, ps, from);
945 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTime to, CaptionType type)
947 if (!_active_captions[type].have (wc)) {
951 shared_ptr<Piece> piece = wp.lock ();
952 shared_ptr<const CaptionContent> caption = wc.lock ();
953 if (!piece || !caption) {
957 DCPTime const dcp_to = content_time_to_dcp (piece, to);
959 if (dcp_to > piece->content->end()) {
963 pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wc, dcp_to);
965 bool const always = type == CAPTION_OPEN && _always_burn_open_captions;
966 if (caption->use() && !always && !caption->burn()) {
967 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
972 Player::seek (DCPTime time, bool accurate)
974 if (!_have_valid_pieces) {
984 if (_audio_processor) {
985 _audio_processor->flush ();
988 _audio_merger.clear ();
989 for (int i = 0; i < CAPTION_COUNT; ++i) {
990 _active_captions[i].clear ();
993 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
994 if (time < i->content->position()) {
995 /* Before; seek to the start of the content */
996 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
998 } else if (i->content->position() <= time && time < i->content->end()) {
999 /* During; seek to position */
1000 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1003 /* After; this piece is done */
1009 _last_video_time = time;
1010 _last_video_eyes = EYES_LEFT;
1011 _last_audio_time = time;
1013 _last_video_time = optional<DCPTime>();
1014 _last_video_eyes = optional<Eyes>();
1015 _last_audio_time = optional<DCPTime>();
1018 _black.set_position (time);
1019 _silent.set_position (time);
1021 _last_video.clear ();
1025 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1027 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1028 player before the video that requires them.
1030 _delay.push_back (make_pair (pv, time));
1032 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1033 _last_video_time = time + one_video_frame();
1035 _last_video_eyes = increment_eyes (pv->eyes());
1037 if (_delay.size() < 3) {
1041 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1043 do_emit_video (to_do.first, to_do.second);
1047 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1049 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1050 for (int i = 0; i < CAPTION_COUNT; ++i) {
1051 _active_captions[i].clear_before (time);
1055 optional<PositionImage> captions = open_captions_for_frame (time);
1057 pv->set_caption (captions.get ());
1064 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1066 /* Log if the assert below is about to fail */
1067 if (_last_audio_time && time != *_last_audio_time) {
1068 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1071 /* This audio must follow on from the previous */
1072 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1074 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1078 Player::fill_audio (DCPTimePeriod period)
1080 if (period.from == period.to) {
1084 DCPOMATIC_ASSERT (period.from < period.to);
1086 DCPTime t = period.from;
1087 while (t < period.to) {
1088 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1089 Frame const samples = block.frames_round(_film->audio_frame_rate());
1091 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1092 silence->make_silent ();
1093 emit_audio (silence, t);
1100 Player::one_video_frame () const
1102 return DCPTime::from_frames (1, _film->video_frame_rate ());
1105 pair<shared_ptr<AudioBuffers>, DCPTime>
1106 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1108 DCPTime const discard_time = discard_to - time;
1109 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1110 Frame remaining_frames = audio->frames() - discard_frames;
1111 if (remaining_frames <= 0) {
1112 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1114 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1115 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1116 return make_pair(cut, time + discard_time);
1120 Player::set_dcp_decode_reduction (optional<int> reduction)
1122 if (reduction == _dcp_decode_reduction) {
1126 _dcp_decode_reduction = reduction;
1127 _have_valid_pieces = false;
1128 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1132 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1134 if (_have_valid_pieces) {
1138 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1139 if (i->content == content) {
1140 return content_time_to_dcp (i, t);
1144 DCPOMATIC_ASSERT (false);