2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83 , _playlist (playlist)
84 , _have_valid_pieces (false)
85 , _ignore_video (false)
86 , _ignore_subtitle (false)
87 , _always_burn_subtitles (false)
89 , _play_referenced (false)
90 , _audio_merger (_film->audio_frame_rate())
93 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96 set_video_container_size (_film->frame_size ());
98 film_changed (Film::AUDIO_PROCESSOR);
100 seek (DCPTime (), true);
109 Player::setup_pieces ()
114 _shuffler = new Shuffler();
115 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
117 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
119 if (!i->paths_valid ()) {
123 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
124 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
127 /* Not something that we can decode; e.g. Atmos content */
131 if (decoder->video && _ignore_video) {
132 decoder->video->set_ignore (true);
135 if (decoder->subtitle && _ignore_subtitle) {
136 decoder->subtitle->set_ignore (true);
139 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
141 dcp->set_decode_referenced (_play_referenced);
142 if (_play_referenced) {
143 dcp->set_forced_reduction (_dcp_decode_reduction);
147 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
148 _pieces.push_back (piece);
150 if (decoder->video) {
151 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
152 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
153 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
155 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
159 if (decoder->audio) {
160 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
163 if (decoder->subtitle) {
164 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
165 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
166 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
170 _stream_states.clear ();
171 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
172 if (i->content->audio) {
173 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
174 _stream_states[j] = StreamState (i, i->content->position ());
179 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
180 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
182 _last_video_time = DCPTime ();
183 _last_video_eyes = EYES_BOTH;
184 _last_audio_time = DCPTime ();
185 _have_valid_pieces = true;
189 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
191 shared_ptr<Content> c = w.lock ();
197 property == ContentProperty::POSITION ||
198 property == ContentProperty::LENGTH ||
199 property == ContentProperty::TRIM_START ||
200 property == ContentProperty::TRIM_END ||
201 property == ContentProperty::PATH ||
202 property == VideoContentProperty::FRAME_TYPE ||
203 property == DCPContentProperty::NEEDS_ASSETS ||
204 property == DCPContentProperty::NEEDS_KDM ||
205 property == SubtitleContentProperty::COLOUR ||
206 property == SubtitleContentProperty::EFFECT ||
207 property == SubtitleContentProperty::EFFECT_COLOUR ||
208 property == FFmpegContentProperty::SUBTITLE_STREAM ||
209 property == FFmpegContentProperty::FILTERS ||
210 property == VideoContentProperty::COLOUR_CONVERSION
213 _have_valid_pieces = false;
217 property == SubtitleContentProperty::LINE_SPACING ||
218 property == SubtitleContentProperty::OUTLINE_WIDTH ||
219 property == SubtitleContentProperty::Y_SCALE ||
220 property == SubtitleContentProperty::FADE_IN ||
221 property == SubtitleContentProperty::FADE_OUT ||
222 property == ContentProperty::VIDEO_FRAME_RATE ||
223 property == SubtitleContentProperty::USE ||
224 property == SubtitleContentProperty::X_OFFSET ||
225 property == SubtitleContentProperty::Y_OFFSET ||
226 property == SubtitleContentProperty::X_SCALE ||
227 property == SubtitleContentProperty::FONTS ||
228 property == VideoContentProperty::CROP ||
229 property == VideoContentProperty::SCALE ||
230 property == VideoContentProperty::FADE_IN ||
231 property == VideoContentProperty::FADE_OUT
239 Player::set_video_container_size (dcp::Size s)
241 if (s == _video_container_size) {
245 _video_container_size = s;
247 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
248 _black_image->make_black ();
254 Player::playlist_changed ()
256 _have_valid_pieces = false;
261 Player::film_changed (Film::Property p)
263 /* Here we should notice Film properties that affect our output, and
264 alert listeners that our output now would be different to how it was
265 last time we were run.
268 if (p == Film::CONTAINER) {
270 } else if (p == Film::VIDEO_FRAME_RATE) {
271 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
272 so we need new pieces here.
274 _have_valid_pieces = false;
276 } else if (p == Film::AUDIO_PROCESSOR) {
277 if (_film->audio_processor ()) {
278 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
284 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
286 list<PositionImage> all;
288 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
293 /* We will scale the subtitle up to fit _video_container_size */
294 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
296 /* Then we need a corrective translation, consisting of two parts:
298 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
299 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
301 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
302 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
303 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
305 * Combining these two translations gives these expressions.
312 dcp::YUV_TO_RGB_REC601,
313 i->image->pixel_format (),
318 lrint (_video_container_size.width * i->rectangle.x),
319 lrint (_video_container_size.height * i->rectangle.y)
328 shared_ptr<PlayerVideo>
329 Player::black_player_video_frame (Eyes eyes) const
331 return shared_ptr<PlayerVideo> (
333 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
336 _video_container_size,
337 _video_container_size,
340 PresetColourConversion::all().front().conversion
346 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
348 DCPTime s = t - piece->content->position ();
349 s = min (piece->content->length_after_trim(), s);
350 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
352 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
353 then convert that ContentTime to frames at the content's rate. However this fails for
354 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
355 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
357 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
359 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
363 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
365 /* See comment in dcp_to_content_video */
366 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
367 return d + piece->content->position();
371 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
373 DCPTime s = t - piece->content->position ();
374 s = min (piece->content->length_after_trim(), s);
375 /* See notes in dcp_to_content_video */
376 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
380 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
382 /* See comment in dcp_to_content_video */
383 return DCPTime::from_frames (f, _film->audio_frame_rate())
384 - DCPTime (piece->content->trim_start(), piece->frc)
385 + piece->content->position();
389 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
391 DCPTime s = t - piece->content->position ();
392 s = min (piece->content->length_after_trim(), s);
393 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
397 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
399 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
402 list<shared_ptr<Font> >
403 Player::get_subtitle_fonts ()
405 if (!_have_valid_pieces) {
409 list<shared_ptr<Font> > fonts;
410 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
411 if (p->content->subtitle) {
412 /* XXX: things may go wrong if there are duplicate font IDs
413 with different font files.
415 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
416 copy (f.begin(), f.end(), back_inserter (fonts));
423 /** Set this player never to produce any video data */
425 Player::set_ignore_video ()
427 _ignore_video = true;
431 Player::set_ignore_subtitle ()
433 _ignore_subtitle = true;
436 /** Set whether or not this player should always burn text subtitles into the image,
437 * regardless of the content settings.
438 * @param burn true to always burn subtitles, false to obey content settings.
441 Player::set_always_burn_subtitles (bool burn)
443 _always_burn_subtitles = burn;
446 /** Sets up the player to be faster, possibly at the expense of quality */
451 _have_valid_pieces = false;
455 Player::set_play_referenced ()
457 _play_referenced = true;
458 _have_valid_pieces = false;
461 list<ReferencedReelAsset>
462 Player::get_reel_assets ()
464 list<ReferencedReelAsset> a;
466 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
467 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
472 scoped_ptr<DCPDecoder> decoder;
474 decoder.reset (new DCPDecoder (j, _film->log(), false));
480 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
482 DCPOMATIC_ASSERT (j->video_frame_rate ());
483 double const cfr = j->video_frame_rate().get();
484 Frame const trim_start = j->trim_start().frames_round (cfr);
485 Frame const trim_end = j->trim_end().frames_round (cfr);
486 int const ffr = _film->video_frame_rate ();
488 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
489 if (j->reference_video ()) {
490 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
491 DCPOMATIC_ASSERT (ra);
492 ra->set_entry_point (ra->entry_point() + trim_start);
493 ra->set_duration (ra->duration() - trim_start - trim_end);
495 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
499 if (j->reference_audio ()) {
500 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
501 DCPOMATIC_ASSERT (ra);
502 ra->set_entry_point (ra->entry_point() + trim_start);
503 ra->set_duration (ra->duration() - trim_start - trim_end);
505 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
509 if (j->reference_subtitle ()) {
510 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
511 DCPOMATIC_ASSERT (ra);
512 ra->set_entry_point (ra->entry_point() + trim_start);
513 ra->set_duration (ra->duration() - trim_start - trim_end);
515 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
519 /* Assume that main picture duration is the length of the reel */
520 offset += k->main_picture()->duration ();
530 if (!_have_valid_pieces) {
534 if (_playlist->length() == DCPTime()) {
535 /* Special case of an empty Film; just give one black frame */
536 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
540 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
542 shared_ptr<Piece> earliest_content;
543 optional<DCPTime> earliest_time;
545 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
550 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
551 if (t > i->content->end()) {
555 /* Given two choices at the same time, pick the one with a subtitle so we see it before
558 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
560 earliest_content = i;
574 if (earliest_content) {
578 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
579 earliest_time = _black.position ();
583 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
584 earliest_time = _silent.position ();
590 earliest_content->done = earliest_content->decoder->pass ();
593 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
594 _black.set_position (_black.position() + one_video_frame());
598 DCPTimePeriod period (_silent.period_at_position());
599 if (_last_audio_time) {
600 /* Sometimes the thing that happened last finishes fractionally before
601 this silence. Bodge the start time of the silence to fix it. I'm
602 not sure if this is the right solution --- maybe the last thing should
603 be padded `forward' rather than this thing padding `back'.
605 period.from = min(period.from, *_last_audio_time);
607 if (period.duration() > one_video_frame()) {
608 period.to = period.from + one_video_frame();
611 _silent.set_position (period.to);
619 /* Emit any audio that is ready */
621 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
622 of our streams, or the position of the _silent.
624 DCPTime pull_to = _film->length ();
625 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
626 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
627 pull_to = i->second.last_push_end;
630 if (!_silent.done() && _silent.position() < pull_to) {
631 pull_to = _silent.position();
634 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
635 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
636 if (_last_audio_time && i->second < *_last_audio_time) {
637 /* This new data comes before the last we emitted (or the last seek); discard it */
638 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
643 } else if (_last_audio_time && i->second > *_last_audio_time) {
644 /* There's a gap between this data and the last we emitted; fill with silence */
645 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
648 emit_audio (i->first, i->second);
653 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
654 do_emit_video(i->first, i->second);
661 optional<PositionImage>
662 Player::subtitles_for_frame (DCPTime time) const
664 list<PositionImage> subtitles;
666 int const vfr = _film->video_frame_rate();
668 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
670 /* Image subtitles */
671 list<PositionImage> c = transform_image_subtitles (i.image);
672 copy (c.begin(), c.end(), back_inserter (subtitles));
674 /* Text subtitles (rendered to an image) */
675 if (!i.text.empty ()) {
676 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
677 copy (s.begin(), s.end(), back_inserter (subtitles));
681 if (subtitles.empty ()) {
682 return optional<PositionImage> ();
685 return merge (subtitles);
689 Player::video (weak_ptr<Piece> wp, ContentVideo video)
691 shared_ptr<Piece> piece = wp.lock ();
696 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
697 if (frc.skip && (video.frame % 2) == 1) {
701 /* Time of the first frame we will emit */
702 DCPTime const time = content_video_to_dcp (piece, video.frame);
704 /* Discard if it's before the content's period or the last accurate seek. We can't discard
705 if it's after the content's period here as in that case we still need to fill any gap between
706 `now' and the end of the content's period.
708 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
712 /* Fill gaps that we discover now that we have some video which needs to be emitted.
713 This is where we need to fill to.
715 DCPTime fill_to = min (time, piece->content->end());
717 if (_last_video_time) {
718 DCPTime fill_from = max (*_last_video_time, piece->content->position());
719 LastVideoMap::const_iterator last = _last_video.find (wp);
720 if (_film->three_d()) {
721 DCPTime j = fill_from;
722 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
723 if (eyes == EYES_BOTH) {
726 while (j < fill_to || eyes != video.eyes) {
727 if (last != _last_video.end()) {
728 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
729 copy->set_eyes (eyes);
730 emit_video (copy, j);
732 emit_video (black_player_video_frame(eyes), j);
734 if (eyes == EYES_RIGHT) {
735 j += one_video_frame();
737 eyes = increment_eyes (eyes);
740 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
741 if (last != _last_video.end()) {
742 emit_video (last->second, j);
744 emit_video (black_player_video_frame(EYES_BOTH), j);
750 _last_video[wp].reset (
753 piece->content->video->crop (),
754 piece->content->video->fade (video.frame),
755 piece->content->video->scale().size (
756 piece->content->video, _video_container_size, _film->frame_size ()
758 _video_container_size,
761 piece->content->video->colour_conversion ()
766 for (int i = 0; i < frc.repeat; ++i) {
767 if (t < piece->content->end()) {
768 emit_video (_last_video[wp], t);
770 t += one_video_frame ();
775 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
777 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
779 shared_ptr<Piece> piece = wp.lock ();
784 shared_ptr<AudioContent> content = piece->content->audio;
785 DCPOMATIC_ASSERT (content);
787 /* Compute time in the DCP */
788 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
789 /* And the end of this block in the DCP */
790 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
792 /* Remove anything that comes before the start or after the end of the content */
793 if (time < piece->content->position()) {
794 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
796 /* This audio is entirely discarded */
799 content_audio.audio = cut.first;
801 } else if (time > piece->content->end()) {
804 } else if (end > piece->content->end()) {
805 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
806 if (remaining_frames == 0) {
809 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
810 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
811 content_audio.audio = cut;
814 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
818 if (content->gain() != 0) {
819 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
820 gain->apply_gain (content->gain ());
821 content_audio.audio = gain;
826 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
830 if (_audio_processor) {
831 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
836 _audio_merger.push (content_audio.audio, time);
837 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
838 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
842 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
844 shared_ptr<Piece> piece = wp.lock ();
849 /* Apply content's subtitle offsets */
850 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
851 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
853 /* Apply content's subtitle scale */
854 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
855 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
857 /* Apply a corrective translation to keep the subtitle centred after that scale */
858 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
859 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
862 ps.image.push_back (subtitle.sub);
863 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
865 _active_subtitles.add_from (wp, ps, from);
869 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
871 shared_ptr<Piece> piece = wp.lock ();
877 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
879 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
880 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
881 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
882 float const xs = piece->content->subtitle->x_scale();
883 float const ys = piece->content->subtitle->y_scale();
884 float size = s.size();
886 /* Adjust size to express the common part of the scaling;
887 e.g. if xs = ys = 0.5 we scale size by 2.
889 if (xs > 1e-5 && ys > 1e-5) {
890 size *= 1 / min (1 / xs, 1 / ys);
894 /* Then express aspect ratio changes */
895 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
896 s.set_aspect_adjust (xs / ys);
899 s.set_in (dcp::Time(from.seconds(), 1000));
900 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
901 ps.add_fonts (piece->content->subtitle->fonts ());
904 _active_subtitles.add_from (wp, ps, from);
908 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
910 if (!_active_subtitles.have (wp)) {
914 shared_ptr<Piece> piece = wp.lock ();
919 DCPTime const dcp_to = content_time_to_dcp (piece, to);
921 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
923 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
924 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
929 Player::seek (DCPTime time, bool accurate)
931 if (!_have_valid_pieces) {
941 if (_audio_processor) {
942 _audio_processor->flush ();
945 _audio_merger.clear ();
946 _active_subtitles.clear ();
948 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
949 if (time < i->content->position()) {
950 /* Before; seek to 0 */
951 i->decoder->seek (ContentTime(), accurate);
953 } else if (i->content->position() <= time && time < i->content->end()) {
954 /* During; seek to position */
955 i->decoder->seek (dcp_to_content_time (i, time), accurate);
958 /* After; this piece is done */
964 _last_video_time = time;
965 _last_video_eyes = EYES_LEFT;
966 _last_audio_time = time;
968 _last_video_time = optional<DCPTime>();
969 _last_video_eyes = optional<Eyes>();
970 _last_audio_time = optional<DCPTime>();
973 _black.set_position (time);
974 _silent.set_position (time);
976 _last_video.clear ();
980 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
982 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
983 player before the video that requires them.
985 _delay.push_back (make_pair (pv, time));
987 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
988 _last_video_time = time + one_video_frame();
989 _active_subtitles.clear_before (time);
991 _last_video_eyes = increment_eyes (pv->eyes());
993 if (_delay.size() < 3) {
997 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
999 do_emit_video (to_do.first, to_do.second);
1003 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1005 optional<PositionImage> subtitles = subtitles_for_frame (time);
1007 pv->set_subtitle (subtitles.get ());
1014 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1016 /* Log if the assert below is about to fail */
1017 if (_last_audio_time && time != *_last_audio_time) {
1018 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1021 /* This audio must follow on from the previous */
1022 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1024 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1028 Player::fill_audio (DCPTimePeriod period)
1030 if (period.from == period.to) {
1034 DCPOMATIC_ASSERT (period.from < period.to);
1036 DCPTime t = period.from;
1037 while (t < period.to) {
1038 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1039 Frame const samples = block.frames_round(_film->audio_frame_rate());
1041 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1042 silence->make_silent ();
1043 emit_audio (silence, t);
1050 Player::one_video_frame () const
1052 return DCPTime::from_frames (1, _film->video_frame_rate ());
1055 pair<shared_ptr<AudioBuffers>, DCPTime>
1056 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1058 DCPTime const discard_time = discard_to - time;
1059 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1060 Frame remaining_frames = audio->frames() - discard_frames;
1061 if (remaining_frames <= 0) {
1062 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1064 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1065 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1066 return make_pair(cut, time + discard_time);
1070 Player::set_dcp_decode_reduction (optional<int> reduction)
1072 if (reduction == _dcp_decode_reduction) {
1076 _dcp_decode_reduction = reduction;
1077 _have_valid_pieces = false;