2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88 , _playlist (playlist)
89 , _have_valid_pieces (false)
90 , _ignore_video (false)
91 , _ignore_subtitle (false)
93 , _play_referenced (false)
94 , _audio_merger (_film->audio_frame_rate())
97 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
98 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
99 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
100 set_video_container_size (_film->frame_size ());
102 film_changed (Film::AUDIO_PROCESSOR);
104 seek (DCPTime (), true);
113 Player::setup_pieces ()
118 _shuffler = new Shuffler();
119 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
121 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
123 if (!i->paths_valid ()) {
127 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
128 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131 /* Not something that we can decode; e.g. Atmos content */
135 if (decoder->video && _ignore_video) {
136 decoder->video->set_ignore (true);
139 if (decoder->caption && _ignore_subtitle) {
140 decoder->caption->set_ignore (true);
143 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
145 dcp->set_decode_referenced (_play_referenced);
146 if (_play_referenced) {
147 dcp->set_forced_reduction (_dcp_decode_reduction);
151 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
152 _pieces.push_back (piece);
154 if (decoder->video) {
155 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
156 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
157 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
159 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
163 if (decoder->audio) {
164 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
167 if (decoder->caption) {
168 decoder->caption->BitmapStart.connect (
169 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<CaptionContent>(piece->content->caption), _1)
171 decoder->caption->PlainStart.connect (
172 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<CaptionContent>(piece->content->caption), _1)
174 decoder->caption->Stop.connect (
175 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<CaptionContent>(piece->content->caption), _1, _2)
180 _stream_states.clear ();
181 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
182 if (i->content->audio) {
183 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
184 _stream_states[j] = StreamState (i, i->content->position ());
189 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
190 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
192 _last_video_time = DCPTime ();
193 _last_video_eyes = EYES_BOTH;
194 _last_audio_time = DCPTime ();
195 _have_valid_pieces = true;
199 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
201 shared_ptr<Content> c = w.lock ();
207 property == ContentProperty::POSITION ||
208 property == ContentProperty::LENGTH ||
209 property == ContentProperty::TRIM_START ||
210 property == ContentProperty::TRIM_END ||
211 property == ContentProperty::PATH ||
212 property == VideoContentProperty::FRAME_TYPE ||
213 property == VideoContentProperty::COLOUR_CONVERSION ||
214 property == AudioContentProperty::STREAMS ||
215 property == DCPContentProperty::NEEDS_ASSETS ||
216 property == DCPContentProperty::NEEDS_KDM ||
217 property == CaptionContentProperty::COLOUR ||
218 property == CaptionContentProperty::EFFECT ||
219 property == CaptionContentProperty::EFFECT_COLOUR ||
220 property == FFmpegContentProperty::SUBTITLE_STREAM ||
221 property == FFmpegContentProperty::FILTERS
224 _have_valid_pieces = false;
225 Changed (property, frequent);
228 property == CaptionContentProperty::LINE_SPACING ||
229 property == CaptionContentProperty::OUTLINE_WIDTH ||
230 property == CaptionContentProperty::Y_SCALE ||
231 property == CaptionContentProperty::FADE_IN ||
232 property == CaptionContentProperty::FADE_OUT ||
233 property == ContentProperty::VIDEO_FRAME_RATE ||
234 property == CaptionContentProperty::USE ||
235 property == CaptionContentProperty::X_OFFSET ||
236 property == CaptionContentProperty::Y_OFFSET ||
237 property == CaptionContentProperty::X_SCALE ||
238 property == CaptionContentProperty::FONTS ||
239 property == CaptionContentProperty::TYPE ||
240 property == VideoContentProperty::CROP ||
241 property == VideoContentProperty::SCALE ||
242 property == VideoContentProperty::FADE_IN ||
243 property == VideoContentProperty::FADE_OUT
246 Changed (property, frequent);
251 Player::set_video_container_size (dcp::Size s)
253 if (s == _video_container_size) {
257 _video_container_size = s;
259 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
260 _black_image->make_black ();
262 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
266 Player::playlist_changed ()
268 _have_valid_pieces = false;
269 Changed (PlayerProperty::PLAYLIST, false);
273 Player::film_changed (Film::Property p)
275 /* Here we should notice Film properties that affect our output, and
276 alert listeners that our output now would be different to how it was
277 last time we were run.
280 if (p == Film::CONTAINER) {
281 Changed (PlayerProperty::FILM_CONTAINER, false);
282 } else if (p == Film::VIDEO_FRAME_RATE) {
283 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
284 so we need new pieces here.
286 _have_valid_pieces = false;
287 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
288 } else if (p == Film::AUDIO_PROCESSOR) {
289 if (_film->audio_processor ()) {
290 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
292 } else if (p == Film::AUDIO_CHANNELS) {
293 _audio_merger.clear ();
298 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
300 list<PositionImage> all;
302 for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
307 /* We will scale the subtitle up to fit _video_container_size */
308 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
314 dcp::YUV_TO_RGB_REC601,
315 i->image->pixel_format (),
320 lrint (_video_container_size.width * i->rectangle.x),
321 lrint (_video_container_size.height * i->rectangle.y)
330 shared_ptr<PlayerVideo>
331 Player::black_player_video_frame (Eyes eyes) const
333 return shared_ptr<PlayerVideo> (
335 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
338 _video_container_size,
339 _video_container_size,
342 PresetColourConversion::all().front().conversion,
343 boost::weak_ptr<Content>(),
344 boost::optional<Frame>()
350 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
352 DCPTime s = t - piece->content->position ();
353 s = min (piece->content->length_after_trim(), s);
354 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
356 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
357 then convert that ContentTime to frames at the content's rate. However this fails for
358 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
359 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
361 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
363 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
367 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
369 /* See comment in dcp_to_content_video */
370 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
371 return d + piece->content->position();
375 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
377 DCPTime s = t - piece->content->position ();
378 s = min (piece->content->length_after_trim(), s);
379 /* See notes in dcp_to_content_video */
380 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
384 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
386 /* See comment in dcp_to_content_video */
387 return DCPTime::from_frames (f, _film->audio_frame_rate())
388 - DCPTime (piece->content->trim_start(), piece->frc)
389 + piece->content->position();
393 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
395 DCPTime s = t - piece->content->position ();
396 s = min (piece->content->length_after_trim(), s);
397 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
401 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
403 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
406 list<shared_ptr<Font> >
407 Player::get_subtitle_fonts ()
409 if (!_have_valid_pieces) {
413 list<shared_ptr<Font> > fonts;
414 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
415 if (p->content->caption) {
416 /* XXX: things may go wrong if there are duplicate font IDs
417 with different font files.
419 list<shared_ptr<Font> > f = p->content->caption->fonts ();
420 copy (f.begin(), f.end(), back_inserter (fonts));
427 /** Set this player never to produce any video data */
429 Player::set_ignore_video ()
431 _ignore_video = true;
435 Player::set_ignore_subtitle ()
437 _ignore_subtitle = true;
440 /** Set a type of caption that this player should always burn into the image,
441 * regardless of the content settings.
442 * @param type type of captions to burn.
445 Player::set_always_burn_captions (CaptionType type)
447 _always_burn_captions = type;
450 /** Sets up the player to be faster, possibly at the expense of quality */
455 _have_valid_pieces = false;
459 Player::set_play_referenced ()
461 _play_referenced = true;
462 _have_valid_pieces = false;
465 list<ReferencedReelAsset>
466 Player::get_reel_assets ()
468 list<ReferencedReelAsset> a;
470 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
471 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
476 scoped_ptr<DCPDecoder> decoder;
478 decoder.reset (new DCPDecoder (j, _film->log(), false));
484 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
486 DCPOMATIC_ASSERT (j->video_frame_rate ());
487 double const cfr = j->video_frame_rate().get();
488 Frame const trim_start = j->trim_start().frames_round (cfr);
489 Frame const trim_end = j->trim_end().frames_round (cfr);
490 int const ffr = _film->video_frame_rate ();
492 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
493 if (j->reference_video ()) {
494 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
495 DCPOMATIC_ASSERT (ra);
496 ra->set_entry_point (ra->entry_point() + trim_start);
497 ra->set_duration (ra->duration() - trim_start - trim_end);
499 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
503 if (j->reference_audio ()) {
504 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
505 DCPOMATIC_ASSERT (ra);
506 ra->set_entry_point (ra->entry_point() + trim_start);
507 ra->set_duration (ra->duration() - trim_start - trim_end);
509 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
513 if (j->reference_subtitle ()) {
514 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
515 DCPOMATIC_ASSERT (ra);
516 ra->set_entry_point (ra->entry_point() + trim_start);
517 ra->set_duration (ra->duration() - trim_start - trim_end);
519 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
523 /* Assume that main picture duration is the length of the reel */
524 offset += k->main_picture()->duration ();
534 if (!_have_valid_pieces) {
538 if (_playlist->length() == DCPTime()) {
539 /* Special case of an empty Film; just give one black frame */
540 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
544 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
546 shared_ptr<Piece> earliest_content;
547 optional<DCPTime> earliest_time;
549 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
554 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
555 if (t > i->content->end()) {
559 /* Given two choices at the same time, pick the one with a subtitle so we see it before
562 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->caption)) {
564 earliest_content = i;
578 if (earliest_content) {
582 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
583 earliest_time = _black.position ();
587 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
588 earliest_time = _silent.position ();
594 earliest_content->done = earliest_content->decoder->pass ();
597 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
598 _black.set_position (_black.position() + one_video_frame());
602 DCPTimePeriod period (_silent.period_at_position());
603 if (_last_audio_time) {
604 /* Sometimes the thing that happened last finishes fractionally before
605 this silence. Bodge the start time of the silence to fix it. I'm
606 not sure if this is the right solution --- maybe the last thing should
607 be padded `forward' rather than this thing padding `back'.
609 period.from = min(period.from, *_last_audio_time);
611 if (period.duration() > one_video_frame()) {
612 period.to = period.from + one_video_frame();
615 _silent.set_position (period.to);
623 /* Emit any audio that is ready */
625 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
626 of our streams, or the position of the _silent.
628 DCPTime pull_to = _film->length ();
629 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
630 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
631 pull_to = i->second.last_push_end;
634 if (!_silent.done() && _silent.position() < pull_to) {
635 pull_to = _silent.position();
638 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
639 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
640 if (_last_audio_time && i->second < *_last_audio_time) {
641 /* This new data comes before the last we emitted (or the last seek); discard it */
642 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
647 } else if (_last_audio_time && i->second > *_last_audio_time) {
648 /* There's a gap between this data and the last we emitted; fill with silence */
649 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
652 emit_audio (i->first, i->second);
657 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
658 do_emit_video(i->first, i->second);
665 optional<PositionImage>
666 Player::captions_for_frame (DCPTime time) const
668 list<PositionImage> captions;
670 int const vfr = _film->video_frame_rate();
672 for (int i = 0; i < CAPTION_COUNT; ++i) {
673 bool const always = _always_burn_captions && *_always_burn_captions == i;
676 _active_captions[i].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), always)
679 /* Image subtitles */
680 list<PositionImage> c = transform_bitmap_captions (j.image);
681 copy (c.begin(), c.end(), back_inserter (captions));
683 /* Text subtitles (rendered to an image) */
684 if (!j.text.empty ()) {
685 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
686 copy (s.begin(), s.end(), back_inserter (captions));
691 if (captions.empty ()) {
692 return optional<PositionImage> ();
695 return merge (captions);
699 Player::video (weak_ptr<Piece> wp, ContentVideo video)
701 shared_ptr<Piece> piece = wp.lock ();
706 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
707 if (frc.skip && (video.frame % 2) == 1) {
711 /* Time of the first frame we will emit */
712 DCPTime const time = content_video_to_dcp (piece, video.frame);
714 /* Discard if it's before the content's period or the last accurate seek. We can't discard
715 if it's after the content's period here as in that case we still need to fill any gap between
716 `now' and the end of the content's period.
718 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
722 /* Fill gaps that we discover now that we have some video which needs to be emitted.
723 This is where we need to fill to.
725 DCPTime fill_to = min (time, piece->content->end());
727 if (_last_video_time) {
728 DCPTime fill_from = max (*_last_video_time, piece->content->position());
729 LastVideoMap::const_iterator last = _last_video.find (wp);
730 if (_film->three_d()) {
731 DCPTime j = fill_from;
732 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
733 if (eyes == EYES_BOTH) {
736 while (j < fill_to || eyes != video.eyes) {
737 if (last != _last_video.end()) {
738 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
739 copy->set_eyes (eyes);
740 emit_video (copy, j);
742 emit_video (black_player_video_frame(eyes), j);
744 if (eyes == EYES_RIGHT) {
745 j += one_video_frame();
747 eyes = increment_eyes (eyes);
750 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
751 if (last != _last_video.end()) {
752 emit_video (last->second, j);
754 emit_video (black_player_video_frame(EYES_BOTH), j);
760 _last_video[wp].reset (
763 piece->content->video->crop (),
764 piece->content->video->fade (video.frame),
765 piece->content->video->scale().size (
766 piece->content->video, _video_container_size, _film->frame_size ()
768 _video_container_size,
771 piece->content->video->colour_conversion(),
778 for (int i = 0; i < frc.repeat; ++i) {
779 if (t < piece->content->end()) {
780 emit_video (_last_video[wp], t);
782 t += one_video_frame ();
787 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
789 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
791 shared_ptr<Piece> piece = wp.lock ();
796 shared_ptr<AudioContent> content = piece->content->audio;
797 DCPOMATIC_ASSERT (content);
799 /* Compute time in the DCP */
800 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
801 /* And the end of this block in the DCP */
802 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
804 /* Remove anything that comes before the start or after the end of the content */
805 if (time < piece->content->position()) {
806 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
808 /* This audio is entirely discarded */
811 content_audio.audio = cut.first;
813 } else if (time > piece->content->end()) {
816 } else if (end > piece->content->end()) {
817 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
818 if (remaining_frames == 0) {
821 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
822 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
823 content_audio.audio = cut;
826 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
830 if (content->gain() != 0) {
831 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
832 gain->apply_gain (content->gain ());
833 content_audio.audio = gain;
838 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
842 if (_audio_processor) {
843 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
848 _audio_merger.push (content_audio.audio, time);
849 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
850 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
854 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<CaptionContent> wc, ContentBitmapCaption subtitle)
856 shared_ptr<Piece> piece = wp.lock ();
857 shared_ptr<CaptionContent> caption = wc.lock ();
858 if (!piece || !caption) {
862 /* Apply content's subtitle offsets */
863 subtitle.sub.rectangle.x += caption->x_offset ();
864 subtitle.sub.rectangle.y += caption->y_offset ();
866 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
867 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((caption->x_scale() - 1) / 2);
868 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((caption->y_scale() - 1) / 2);
870 /* Apply content's subtitle scale */
871 subtitle.sub.rectangle.width *= caption->x_scale ();
872 subtitle.sub.rectangle.height *= caption->y_scale ();
875 ps.image.push_back (subtitle.sub);
876 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
878 _active_captions[subtitle.type()].add_from (wc, ps, from);
882 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<CaptionContent> wc, ContentTextCaption subtitle)
884 shared_ptr<Piece> piece = wp.lock ();
885 shared_ptr<CaptionContent> caption = wc.lock ();
886 if (!piece || !caption) {
891 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
893 if (from > piece->content->end()) {
897 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
898 s.set_h_position (s.h_position() + caption->x_offset ());
899 s.set_v_position (s.v_position() + caption->y_offset ());
900 float const xs = caption->x_scale();
901 float const ys = caption->y_scale();
902 float size = s.size();
904 /* Adjust size to express the common part of the scaling;
905 e.g. if xs = ys = 0.5 we scale size by 2.
907 if (xs > 1e-5 && ys > 1e-5) {
908 size *= 1 / min (1 / xs, 1 / ys);
912 /* Then express aspect ratio changes */
913 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
914 s.set_aspect_adjust (xs / ys);
917 s.set_in (dcp::Time(from.seconds(), 1000));
918 ps.text.push_back (TextCaption (s, caption->outline_width()));
919 ps.add_fonts (caption->fonts ());
922 _active_captions[subtitle.type()].add_from (wc, ps, from);
926 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<CaptionContent> wc, ContentTime to, CaptionType type)
928 if (!_active_captions[type].have (wc)) {
932 shared_ptr<Piece> piece = wp.lock ();
933 shared_ptr<CaptionContent> caption = wc.lock ();
934 if (!piece || !caption) {
938 DCPTime const dcp_to = content_time_to_dcp (piece, to);
940 if (dcp_to > piece->content->end()) {
944 pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wc, dcp_to);
946 bool const always = _always_burn_captions && *_always_burn_captions == type;
947 if (caption->use() && !always && !caption->burn()) {
948 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
953 Player::seek (DCPTime time, bool accurate)
955 if (!_have_valid_pieces) {
965 if (_audio_processor) {
966 _audio_processor->flush ();
969 _audio_merger.clear ();
970 for (int i = 0; i < CAPTION_COUNT; ++i) {
971 _active_captions[i].clear ();
974 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
975 if (time < i->content->position()) {
976 /* Before; seek to the start of the content */
977 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
979 } else if (i->content->position() <= time && time < i->content->end()) {
980 /* During; seek to position */
981 i->decoder->seek (dcp_to_content_time (i, time), accurate);
984 /* After; this piece is done */
990 _last_video_time = time;
991 _last_video_eyes = EYES_LEFT;
992 _last_audio_time = time;
994 _last_video_time = optional<DCPTime>();
995 _last_video_eyes = optional<Eyes>();
996 _last_audio_time = optional<DCPTime>();
999 _black.set_position (time);
1000 _silent.set_position (time);
1002 _last_video.clear ();
1006 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1008 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1009 player before the video that requires them.
1011 _delay.push_back (make_pair (pv, time));
1013 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1014 _last_video_time = time + one_video_frame();
1016 _last_video_eyes = increment_eyes (pv->eyes());
1018 if (_delay.size() < 3) {
1022 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1024 do_emit_video (to_do.first, to_do.second);
1028 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1030 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1031 for (int i = 0; i < CAPTION_COUNT; ++i) {
1032 _active_captions[i].clear_before (time);
1036 optional<PositionImage> captions = captions_for_frame (time);
1038 pv->set_caption (captions.get ());
1045 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1047 /* Log if the assert below is about to fail */
1048 if (_last_audio_time && time != *_last_audio_time) {
1049 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1052 /* This audio must follow on from the previous */
1053 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1055 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1059 Player::fill_audio (DCPTimePeriod period)
1061 if (period.from == period.to) {
1065 DCPOMATIC_ASSERT (period.from < period.to);
1067 DCPTime t = period.from;
1068 while (t < period.to) {
1069 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1070 Frame const samples = block.frames_round(_film->audio_frame_rate());
1072 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1073 silence->make_silent ();
1074 emit_audio (silence, t);
1081 Player::one_video_frame () const
1083 return DCPTime::from_frames (1, _film->video_frame_rate ());
1086 pair<shared_ptr<AudioBuffers>, DCPTime>
1087 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1089 DCPTime const discard_time = discard_to - time;
1090 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1091 Frame remaining_frames = audio->frames() - discard_frames;
1092 if (remaining_frames <= 0) {
1093 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1095 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1096 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1097 return make_pair(cut, time + discard_time);
1101 Player::set_dcp_decode_reduction (optional<int> reduction)
1103 if (reduction == _dcp_decode_reduction) {
1107 _dcp_decode_reduction = reduction;
1108 _have_valid_pieces = false;
1109 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1113 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1115 if (_have_valid_pieces) {
1119 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1120 if (i->content == content) {
1121 return content_time_to_dcp (i, t);
1125 DCPOMATIC_ASSERT (false);