2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88 , _playlist (playlist)
89 , _have_valid_pieces (false)
90 , _ignore_video (false)
91 , _ignore_subtitle (false)
93 , _play_referenced (false)
94 , _audio_merger (_film->audio_frame_rate())
97 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
98 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
99 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
100 set_video_container_size (_film->frame_size ());
102 film_changed (Film::AUDIO_PROCESSOR);
104 seek (DCPTime (), true);
113 Player::setup_pieces ()
118 _shuffler = new Shuffler();
119 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
121 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
123 if (!i->paths_valid ()) {
127 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
128 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131 /* Not something that we can decode; e.g. Atmos content */
135 if (decoder->video && _ignore_video) {
136 decoder->video->set_ignore (true);
139 if (decoder->caption && _ignore_subtitle) {
140 decoder->caption->set_ignore (true);
143 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
145 dcp->set_decode_referenced (_play_referenced);
146 if (_play_referenced) {
147 dcp->set_forced_reduction (_dcp_decode_reduction);
151 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
152 _pieces.push_back (piece);
154 if (decoder->video) {
155 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
156 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
157 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
159 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
163 if (decoder->audio) {
164 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
167 if (decoder->caption) {
168 decoder->caption->BitmapStart.connect (bind (&Player::bitmap_text_start, this, weak_ptr<Piece> (piece), _1));
169 decoder->caption->PlainStart.connect (bind (&Player::plain_text_start, this, weak_ptr<Piece> (piece), _1));
170 decoder->caption->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1, _2));
174 _stream_states.clear ();
175 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
176 if (i->content->audio) {
177 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
178 _stream_states[j] = StreamState (i, i->content->position ());
183 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
184 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
186 _last_video_time = DCPTime ();
187 _last_video_eyes = EYES_BOTH;
188 _last_audio_time = DCPTime ();
189 _have_valid_pieces = true;
193 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
195 shared_ptr<Content> c = w.lock ();
201 property == ContentProperty::POSITION ||
202 property == ContentProperty::LENGTH ||
203 property == ContentProperty::TRIM_START ||
204 property == ContentProperty::TRIM_END ||
205 property == ContentProperty::PATH ||
206 property == VideoContentProperty::FRAME_TYPE ||
207 property == VideoContentProperty::COLOUR_CONVERSION ||
208 property == AudioContentProperty::STREAMS ||
209 property == DCPContentProperty::NEEDS_ASSETS ||
210 property == DCPContentProperty::NEEDS_KDM ||
211 property == CaptionContentProperty::COLOUR ||
212 property == CaptionContentProperty::EFFECT ||
213 property == CaptionContentProperty::EFFECT_COLOUR ||
214 property == FFmpegContentProperty::SUBTITLE_STREAM ||
215 property == FFmpegContentProperty::FILTERS
218 _have_valid_pieces = false;
219 Changed (property, frequent);
222 property == CaptionContentProperty::LINE_SPACING ||
223 property == CaptionContentProperty::OUTLINE_WIDTH ||
224 property == CaptionContentProperty::Y_SCALE ||
225 property == CaptionContentProperty::FADE_IN ||
226 property == CaptionContentProperty::FADE_OUT ||
227 property == ContentProperty::VIDEO_FRAME_RATE ||
228 property == CaptionContentProperty::USE ||
229 property == CaptionContentProperty::X_OFFSET ||
230 property == CaptionContentProperty::Y_OFFSET ||
231 property == CaptionContentProperty::X_SCALE ||
232 property == CaptionContentProperty::FONTS ||
233 property == CaptionContentProperty::TYPE ||
234 property == VideoContentProperty::CROP ||
235 property == VideoContentProperty::SCALE ||
236 property == VideoContentProperty::FADE_IN ||
237 property == VideoContentProperty::FADE_OUT
240 Changed (property, frequent);
245 Player::set_video_container_size (dcp::Size s)
247 if (s == _video_container_size) {
251 _video_container_size = s;
253 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
254 _black_image->make_black ();
256 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
260 Player::playlist_changed ()
262 _have_valid_pieces = false;
263 Changed (PlayerProperty::PLAYLIST, false);
267 Player::film_changed (Film::Property p)
269 /* Here we should notice Film properties that affect our output, and
270 alert listeners that our output now would be different to how it was
271 last time we were run.
274 if (p == Film::CONTAINER) {
275 Changed (PlayerProperty::FILM_CONTAINER, false);
276 } else if (p == Film::VIDEO_FRAME_RATE) {
277 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
278 so we need new pieces here.
280 _have_valid_pieces = false;
281 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
282 } else if (p == Film::AUDIO_PROCESSOR) {
283 if (_film->audio_processor ()) {
284 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
286 } else if (p == Film::AUDIO_CHANNELS) {
287 _audio_merger.clear ();
292 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
294 list<PositionImage> all;
296 for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
301 /* We will scale the subtitle up to fit _video_container_size */
302 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
308 dcp::YUV_TO_RGB_REC601,
309 i->image->pixel_format (),
314 lrint (_video_container_size.width * i->rectangle.x),
315 lrint (_video_container_size.height * i->rectangle.y)
324 shared_ptr<PlayerVideo>
325 Player::black_player_video_frame (Eyes eyes) const
327 return shared_ptr<PlayerVideo> (
329 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
332 _video_container_size,
333 _video_container_size,
336 PresetColourConversion::all().front().conversion,
337 boost::weak_ptr<Content>(),
338 boost::optional<Frame>()
344 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
346 DCPTime s = t - piece->content->position ();
347 s = min (piece->content->length_after_trim(), s);
348 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
350 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
351 then convert that ContentTime to frames at the content's rate. However this fails for
352 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
353 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
355 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
357 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
361 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
363 /* See comment in dcp_to_content_video */
364 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
365 return d + piece->content->position();
369 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
371 DCPTime s = t - piece->content->position ();
372 s = min (piece->content->length_after_trim(), s);
373 /* See notes in dcp_to_content_video */
374 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
378 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
380 /* See comment in dcp_to_content_video */
381 return DCPTime::from_frames (f, _film->audio_frame_rate())
382 - DCPTime (piece->content->trim_start(), piece->frc)
383 + piece->content->position();
387 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
389 DCPTime s = t - piece->content->position ();
390 s = min (piece->content->length_after_trim(), s);
391 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
395 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
397 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
400 list<shared_ptr<Font> >
401 Player::get_subtitle_fonts ()
403 if (!_have_valid_pieces) {
407 list<shared_ptr<Font> > fonts;
408 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
409 if (p->content->caption) {
410 /* XXX: things may go wrong if there are duplicate font IDs
411 with different font files.
413 list<shared_ptr<Font> > f = p->content->caption->fonts ();
414 copy (f.begin(), f.end(), back_inserter (fonts));
421 /** Set this player never to produce any video data */
423 Player::set_ignore_video ()
425 _ignore_video = true;
429 Player::set_ignore_subtitle ()
431 _ignore_subtitle = true;
434 /** Set a type of caption that this player should always burn into the image,
435 * regardless of the content settings.
436 * @param type type of captions to burn.
439 Player::set_always_burn_captions (CaptionType type)
441 _always_burn_captions = type;
444 /** Sets up the player to be faster, possibly at the expense of quality */
449 _have_valid_pieces = false;
453 Player::set_play_referenced ()
455 _play_referenced = true;
456 _have_valid_pieces = false;
459 list<ReferencedReelAsset>
460 Player::get_reel_assets ()
462 list<ReferencedReelAsset> a;
464 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
465 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
470 scoped_ptr<DCPDecoder> decoder;
472 decoder.reset (new DCPDecoder (j, _film->log(), false));
478 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
480 DCPOMATIC_ASSERT (j->video_frame_rate ());
481 double const cfr = j->video_frame_rate().get();
482 Frame const trim_start = j->trim_start().frames_round (cfr);
483 Frame const trim_end = j->trim_end().frames_round (cfr);
484 int const ffr = _film->video_frame_rate ();
486 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
487 if (j->reference_video ()) {
488 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
489 DCPOMATIC_ASSERT (ra);
490 ra->set_entry_point (ra->entry_point() + trim_start);
491 ra->set_duration (ra->duration() - trim_start - trim_end);
493 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
497 if (j->reference_audio ()) {
498 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
499 DCPOMATIC_ASSERT (ra);
500 ra->set_entry_point (ra->entry_point() + trim_start);
501 ra->set_duration (ra->duration() - trim_start - trim_end);
503 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
507 if (j->reference_subtitle ()) {
508 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
509 DCPOMATIC_ASSERT (ra);
510 ra->set_entry_point (ra->entry_point() + trim_start);
511 ra->set_duration (ra->duration() - trim_start - trim_end);
513 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
517 /* Assume that main picture duration is the length of the reel */
518 offset += k->main_picture()->duration ();
528 if (!_have_valid_pieces) {
532 if (_playlist->length() == DCPTime()) {
533 /* Special case of an empty Film; just give one black frame */
534 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
538 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
540 shared_ptr<Piece> earliest_content;
541 optional<DCPTime> earliest_time;
543 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
548 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
549 if (t > i->content->end()) {
553 /* Given two choices at the same time, pick the one with a subtitle so we see it before
556 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->caption)) {
558 earliest_content = i;
572 if (earliest_content) {
576 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
577 earliest_time = _black.position ();
581 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
582 earliest_time = _silent.position ();
588 earliest_content->done = earliest_content->decoder->pass ();
591 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
592 _black.set_position (_black.position() + one_video_frame());
596 DCPTimePeriod period (_silent.period_at_position());
597 if (_last_audio_time) {
598 /* Sometimes the thing that happened last finishes fractionally before
599 this silence. Bodge the start time of the silence to fix it. I'm
600 not sure if this is the right solution --- maybe the last thing should
601 be padded `forward' rather than this thing padding `back'.
603 period.from = min(period.from, *_last_audio_time);
605 if (period.duration() > one_video_frame()) {
606 period.to = period.from + one_video_frame();
609 _silent.set_position (period.to);
617 /* Emit any audio that is ready */
619 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
620 of our streams, or the position of the _silent.
622 DCPTime pull_to = _film->length ();
623 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
624 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
625 pull_to = i->second.last_push_end;
628 if (!_silent.done() && _silent.position() < pull_to) {
629 pull_to = _silent.position();
632 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
633 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
634 if (_last_audio_time && i->second < *_last_audio_time) {
635 /* This new data comes before the last we emitted (or the last seek); discard it */
636 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
641 } else if (_last_audio_time && i->second > *_last_audio_time) {
642 /* There's a gap between this data and the last we emitted; fill with silence */
643 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
646 emit_audio (i->first, i->second);
651 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
652 do_emit_video(i->first, i->second);
659 optional<PositionImage>
660 Player::captions_for_frame (DCPTime time) const
662 list<PositionImage> captions;
664 int const vfr = _film->video_frame_rate();
666 for (int i = 0; i < CAPTION_COUNT; ++i) {
667 bool const always = _always_burn_captions && *_always_burn_captions == i;
670 _active_captions[i].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), always)
673 /* Image subtitles */
674 list<PositionImage> c = transform_bitmap_captions (j.image);
675 copy (c.begin(), c.end(), back_inserter (captions));
677 /* Text subtitles (rendered to an image) */
678 if (!j.text.empty ()) {
679 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
680 copy (s.begin(), s.end(), back_inserter (captions));
685 if (captions.empty ()) {
686 return optional<PositionImage> ();
689 return merge (captions);
693 Player::video (weak_ptr<Piece> wp, ContentVideo video)
695 shared_ptr<Piece> piece = wp.lock ();
700 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
701 if (frc.skip && (video.frame % 2) == 1) {
705 /* Time of the first frame we will emit */
706 DCPTime const time = content_video_to_dcp (piece, video.frame);
708 /* Discard if it's before the content's period or the last accurate seek. We can't discard
709 if it's after the content's period here as in that case we still need to fill any gap between
710 `now' and the end of the content's period.
712 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
716 /* Fill gaps that we discover now that we have some video which needs to be emitted.
717 This is where we need to fill to.
719 DCPTime fill_to = min (time, piece->content->end());
721 if (_last_video_time) {
722 DCPTime fill_from = max (*_last_video_time, piece->content->position());
723 LastVideoMap::const_iterator last = _last_video.find (wp);
724 if (_film->three_d()) {
725 DCPTime j = fill_from;
726 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
727 if (eyes == EYES_BOTH) {
730 while (j < fill_to || eyes != video.eyes) {
731 if (last != _last_video.end()) {
732 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
733 copy->set_eyes (eyes);
734 emit_video (copy, j);
736 emit_video (black_player_video_frame(eyes), j);
738 if (eyes == EYES_RIGHT) {
739 j += one_video_frame();
741 eyes = increment_eyes (eyes);
744 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
745 if (last != _last_video.end()) {
746 emit_video (last->second, j);
748 emit_video (black_player_video_frame(EYES_BOTH), j);
754 _last_video[wp].reset (
757 piece->content->video->crop (),
758 piece->content->video->fade (video.frame),
759 piece->content->video->scale().size (
760 piece->content->video, _video_container_size, _film->frame_size ()
762 _video_container_size,
765 piece->content->video->colour_conversion(),
772 for (int i = 0; i < frc.repeat; ++i) {
773 if (t < piece->content->end()) {
774 emit_video (_last_video[wp], t);
776 t += one_video_frame ();
781 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
783 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
785 shared_ptr<Piece> piece = wp.lock ();
790 shared_ptr<AudioContent> content = piece->content->audio;
791 DCPOMATIC_ASSERT (content);
793 /* Compute time in the DCP */
794 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
795 /* And the end of this block in the DCP */
796 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
798 /* Remove anything that comes before the start or after the end of the content */
799 if (time < piece->content->position()) {
800 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
802 /* This audio is entirely discarded */
805 content_audio.audio = cut.first;
807 } else if (time > piece->content->end()) {
810 } else if (end > piece->content->end()) {
811 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
812 if (remaining_frames == 0) {
815 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
816 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
817 content_audio.audio = cut;
820 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
824 if (content->gain() != 0) {
825 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
826 gain->apply_gain (content->gain ());
827 content_audio.audio = gain;
832 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
836 if (_audio_processor) {
837 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
842 _audio_merger.push (content_audio.audio, time);
843 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
844 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
848 Player::bitmap_text_start (weak_ptr<Piece> wp, ContentBitmapCaption subtitle)
850 shared_ptr<Piece> piece = wp.lock ();
855 /* Apply content's subtitle offsets */
856 subtitle.sub.rectangle.x += piece->content->caption->x_offset ();
857 subtitle.sub.rectangle.y += piece->content->caption->y_offset ();
859 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
860 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((piece->content->caption->x_scale() - 1) / 2);
861 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((piece->content->caption->y_scale() - 1) / 2);
863 /* Apply content's subtitle scale */
864 subtitle.sub.rectangle.width *= piece->content->caption->x_scale ();
865 subtitle.sub.rectangle.height *= piece->content->caption->y_scale ();
868 ps.image.push_back (subtitle.sub);
869 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
871 _active_captions[subtitle.type()].add_from (wp, ps, from);
875 Player::plain_text_start (weak_ptr<Piece> wp, ContentTextCaption subtitle)
877 shared_ptr<Piece> piece = wp.lock ();
883 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
885 if (from > piece->content->end()) {
889 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
890 s.set_h_position (s.h_position() + piece->content->caption->x_offset ());
891 s.set_v_position (s.v_position() + piece->content->caption->y_offset ());
892 float const xs = piece->content->caption->x_scale();
893 float const ys = piece->content->caption->y_scale();
894 float size = s.size();
896 /* Adjust size to express the common part of the scaling;
897 e.g. if xs = ys = 0.5 we scale size by 2.
899 if (xs > 1e-5 && ys > 1e-5) {
900 size *= 1 / min (1 / xs, 1 / ys);
904 /* Then express aspect ratio changes */
905 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
906 s.set_aspect_adjust (xs / ys);
909 s.set_in (dcp::Time(from.seconds(), 1000));
910 ps.text.push_back (TextCaption (s, piece->content->caption->outline_width()));
911 ps.add_fonts (piece->content->caption->fonts ());
914 _active_captions[subtitle.type()].add_from (wp, ps, from);
918 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to, CaptionType type)
920 if (!_active_captions[type].have (wp)) {
924 shared_ptr<Piece> piece = wp.lock ();
929 DCPTime const dcp_to = content_time_to_dcp (piece, to);
931 if (dcp_to > piece->content->end()) {
935 pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wp, dcp_to);
937 bool const always = _always_burn_captions && *_always_burn_captions == type;
938 if (piece->content->caption->use() && !always && !piece->content->caption->burn()) {
939 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
944 Player::seek (DCPTime time, bool accurate)
946 if (!_have_valid_pieces) {
956 if (_audio_processor) {
957 _audio_processor->flush ();
960 _audio_merger.clear ();
961 for (int i = 0; i < CAPTION_COUNT; ++i) {
962 _active_captions[i].clear ();
965 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
966 if (time < i->content->position()) {
967 /* Before; seek to the start of the content */
968 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
970 } else if (i->content->position() <= time && time < i->content->end()) {
971 /* During; seek to position */
972 i->decoder->seek (dcp_to_content_time (i, time), accurate);
975 /* After; this piece is done */
981 _last_video_time = time;
982 _last_video_eyes = EYES_LEFT;
983 _last_audio_time = time;
985 _last_video_time = optional<DCPTime>();
986 _last_video_eyes = optional<Eyes>();
987 _last_audio_time = optional<DCPTime>();
990 _black.set_position (time);
991 _silent.set_position (time);
993 _last_video.clear ();
997 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
999 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1000 player before the video that requires them.
1002 _delay.push_back (make_pair (pv, time));
1004 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1005 _last_video_time = time + one_video_frame();
1007 _last_video_eyes = increment_eyes (pv->eyes());
1009 if (_delay.size() < 3) {
1013 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1015 do_emit_video (to_do.first, to_do.second);
1019 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1021 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1022 for (int i = 0; i < CAPTION_COUNT; ++i) {
1023 _active_captions[i].clear_before (time);
1027 optional<PositionImage> captions = captions_for_frame (time);
1029 pv->set_caption (captions.get ());
1036 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1038 /* Log if the assert below is about to fail */
1039 if (_last_audio_time && time != *_last_audio_time) {
1040 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1043 /* This audio must follow on from the previous */
1044 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1046 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1050 Player::fill_audio (DCPTimePeriod period)
1052 if (period.from == period.to) {
1056 DCPOMATIC_ASSERT (period.from < period.to);
1058 DCPTime t = period.from;
1059 while (t < period.to) {
1060 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1061 Frame const samples = block.frames_round(_film->audio_frame_rate());
1063 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1064 silence->make_silent ();
1065 emit_audio (silence, t);
1072 Player::one_video_frame () const
1074 return DCPTime::from_frames (1, _film->video_frame_rate ());
1077 pair<shared_ptr<AudioBuffers>, DCPTime>
1078 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1080 DCPTime const discard_time = discard_to - time;
1081 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1082 Frame remaining_frames = audio->frames() - discard_frames;
1083 if (remaining_frames <= 0) {
1084 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1086 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1087 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1088 return make_pair(cut, time + discard_time);
1092 Player::set_dcp_decode_reduction (optional<int> reduction)
1094 if (reduction == _dcp_decode_reduction) {
1098 _dcp_decode_reduction = reduction;
1099 _have_valid_pieces = false;
1100 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1104 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1106 if (_have_valid_pieces) {
1110 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1111 if (i->content == content) {
1112 return content_time_to_dcp (i, t);
1116 DCPOMATIC_ASSERT (false);