2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
54 #include <dcp/reel_sound_asset.h>
55 #include <dcp/reel_subtitle_asset.h>
56 #include <dcp/reel_picture_asset.h>
57 #include <boost/foreach.hpp>
64 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
82 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
84 , _playlist (playlist)
85 , _have_valid_pieces (false)
86 , _ignore_video (false)
87 , _ignore_subtitle (false)
88 , _always_burn_subtitles (false)
90 , _play_referenced (false)
91 , _audio_merger (_film->audio_frame_rate())
95 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
96 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
97 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
98 set_video_container_size (_film->frame_size ());
100 film_changed (Film::AUDIO_PROCESSOR);
102 seek (DCPTime (), true);
112 Player::setup_pieces ()
117 _shuffler = new Shuffler();
118 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
121 _delay = new Delay();
122 _delay->Video.connect(bind(&Player::video, this, _1, _2));
124 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
126 if (!i->paths_valid ()) {
130 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
131 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
134 /* Not something that we can decode; e.g. Atmos content */
138 if (decoder->video && _ignore_video) {
139 decoder->video->set_ignore (true);
142 if (decoder->subtitle && _ignore_subtitle) {
143 decoder->subtitle->set_ignore (true);
146 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
148 dcp->set_decode_referenced (_play_referenced);
149 if (_play_referenced) {
150 dcp->set_forced_reduction (_dcp_decode_reduction);
154 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
155 _pieces.push_back (piece);
157 if (decoder->video) {
158 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
159 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
160 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
162 /* We need a Delay to give a little wiggle room to ensure that relevent subtitles arrive at the
163 player before the video that requires them.
165 decoder->video->Data.connect (bind (&Delay::video, _delay, weak_ptr<Piece>(piece), _1));
169 if (decoder->audio) {
170 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
173 if (decoder->subtitle) {
174 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
175 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
176 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
180 _stream_states.clear ();
181 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
182 if (i->content->audio) {
183 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
184 _stream_states[j] = StreamState (i, i->content->position ());
189 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
190 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
192 _last_video_time = DCPTime ();
193 _last_video_eyes = EYES_BOTH;
194 _last_audio_time = DCPTime ();
195 _have_valid_pieces = true;
199 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
201 shared_ptr<Content> c = w.lock ();
207 property == ContentProperty::POSITION ||
208 property == ContentProperty::LENGTH ||
209 property == ContentProperty::TRIM_START ||
210 property == ContentProperty::TRIM_END ||
211 property == ContentProperty::PATH ||
212 property == VideoContentProperty::FRAME_TYPE ||
213 property == DCPContentProperty::NEEDS_ASSETS ||
214 property == DCPContentProperty::NEEDS_KDM ||
215 property == SubtitleContentProperty::COLOUR ||
216 property == SubtitleContentProperty::EFFECT ||
217 property == SubtitleContentProperty::EFFECT_COLOUR ||
218 property == FFmpegContentProperty::SUBTITLE_STREAM ||
219 property == FFmpegContentProperty::FILTERS ||
220 property == VideoContentProperty::COLOUR_CONVERSION
223 _have_valid_pieces = false;
227 property == SubtitleContentProperty::LINE_SPACING ||
228 property == SubtitleContentProperty::OUTLINE_WIDTH ||
229 property == SubtitleContentProperty::Y_SCALE ||
230 property == SubtitleContentProperty::FADE_IN ||
231 property == SubtitleContentProperty::FADE_OUT ||
232 property == ContentProperty::VIDEO_FRAME_RATE ||
233 property == SubtitleContentProperty::USE ||
234 property == SubtitleContentProperty::X_OFFSET ||
235 property == SubtitleContentProperty::Y_OFFSET ||
236 property == SubtitleContentProperty::X_SCALE ||
237 property == SubtitleContentProperty::FONTS ||
238 property == VideoContentProperty::CROP ||
239 property == VideoContentProperty::SCALE ||
240 property == VideoContentProperty::FADE_IN ||
241 property == VideoContentProperty::FADE_OUT
249 Player::set_video_container_size (dcp::Size s)
251 if (s == _video_container_size) {
255 _video_container_size = s;
257 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
258 _black_image->make_black ();
264 Player::playlist_changed ()
266 _have_valid_pieces = false;
271 Player::film_changed (Film::Property p)
273 /* Here we should notice Film properties that affect our output, and
274 alert listeners that our output now would be different to how it was
275 last time we were run.
278 if (p == Film::CONTAINER) {
280 } else if (p == Film::VIDEO_FRAME_RATE) {
281 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
282 so we need new pieces here.
284 _have_valid_pieces = false;
286 } else if (p == Film::AUDIO_PROCESSOR) {
287 if (_film->audio_processor ()) {
288 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
294 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
296 list<PositionImage> all;
298 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
303 /* We will scale the subtitle up to fit _video_container_size */
304 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
306 /* Then we need a corrective translation, consisting of two parts:
308 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
309 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
311 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
312 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
313 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
315 * Combining these two translations gives these expressions.
322 dcp::YUV_TO_RGB_REC601,
323 i->image->pixel_format (),
328 lrint (_video_container_size.width * i->rectangle.x),
329 lrint (_video_container_size.height * i->rectangle.y)
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
341 return shared_ptr<PlayerVideo> (
343 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
346 _video_container_size,
347 _video_container_size,
350 PresetColourConversion::all().front().conversion
356 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
358 DCPTime s = t - piece->content->position ();
359 s = min (piece->content->length_after_trim(), s);
360 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
362 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
363 then convert that ContentTime to frames at the content's rate. However this fails for
364 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
365 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
367 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
369 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
373 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
375 /* See comment in dcp_to_content_video */
376 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
377 return d + piece->content->position();
381 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
383 DCPTime s = t - piece->content->position ();
384 s = min (piece->content->length_after_trim(), s);
385 /* See notes in dcp_to_content_video */
386 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
390 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
392 /* See comment in dcp_to_content_video */
393 return DCPTime::from_frames (f, _film->audio_frame_rate())
394 - DCPTime (piece->content->trim_start(), piece->frc)
395 + piece->content->position();
399 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
401 DCPTime s = t - piece->content->position ();
402 s = min (piece->content->length_after_trim(), s);
403 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
407 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
409 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
412 list<shared_ptr<Font> >
413 Player::get_subtitle_fonts ()
415 if (!_have_valid_pieces) {
419 list<shared_ptr<Font> > fonts;
420 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
421 if (p->content->subtitle) {
422 /* XXX: things may go wrong if there are duplicate font IDs
423 with different font files.
425 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
426 copy (f.begin(), f.end(), back_inserter (fonts));
433 /** Set this player never to produce any video data */
435 Player::set_ignore_video ()
437 _ignore_video = true;
441 Player::set_ignore_subtitle ()
443 _ignore_subtitle = true;
446 /** Set whether or not this player should always burn text subtitles into the image,
447 * regardless of the content settings.
448 * @param burn true to always burn subtitles, false to obey content settings.
451 Player::set_always_burn_subtitles (bool burn)
453 _always_burn_subtitles = burn;
456 /** Sets up the player to be faster, possibly at the expense of quality */
461 _have_valid_pieces = false;
465 Player::set_play_referenced ()
467 _play_referenced = true;
468 _have_valid_pieces = false;
471 list<ReferencedReelAsset>
472 Player::get_reel_assets ()
474 list<ReferencedReelAsset> a;
476 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
477 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
482 scoped_ptr<DCPDecoder> decoder;
484 decoder.reset (new DCPDecoder (j, _film->log(), false));
490 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
492 DCPOMATIC_ASSERT (j->video_frame_rate ());
493 double const cfr = j->video_frame_rate().get();
494 Frame const trim_start = j->trim_start().frames_round (cfr);
495 Frame const trim_end = j->trim_end().frames_round (cfr);
496 int const ffr = _film->video_frame_rate ();
498 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
499 if (j->reference_video ()) {
500 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
501 DCPOMATIC_ASSERT (ra);
502 ra->set_entry_point (ra->entry_point() + trim_start);
503 ra->set_duration (ra->duration() - trim_start - trim_end);
505 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
509 if (j->reference_audio ()) {
510 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
511 DCPOMATIC_ASSERT (ra);
512 ra->set_entry_point (ra->entry_point() + trim_start);
513 ra->set_duration (ra->duration() - trim_start - trim_end);
515 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
519 if (j->reference_subtitle ()) {
520 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
521 DCPOMATIC_ASSERT (ra);
522 ra->set_entry_point (ra->entry_point() + trim_start);
523 ra->set_duration (ra->duration() - trim_start - trim_end);
525 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
529 /* Assume that main picture duration is the length of the reel */
530 offset += k->main_picture()->duration ();
540 if (!_have_valid_pieces) {
544 if (_playlist->length() == DCPTime()) {
545 /* Special case of an empty Film; just give one black frame */
546 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
550 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
552 shared_ptr<Piece> earliest_content;
553 optional<DCPTime> earliest_time;
555 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
560 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
561 if (t > i->content->end()) {
565 /* Given two choices at the same time, pick the one with a subtitle so we see it before
568 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
570 earliest_content = i;
584 if (earliest_content) {
588 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
589 earliest_time = _black.position ();
593 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
594 earliest_time = _silent.position ();
600 earliest_content->done = earliest_content->decoder->pass ();
603 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
604 _black.set_position (_black.position() + one_video_frame());
608 DCPTimePeriod period (_silent.period_at_position());
609 if (_last_audio_time) {
610 /* Sometimes the thing that happened last finishes fractionally before
611 this silence. Bodge the start time of the silence to fix it. I'm
612 not sure if this is the right solution --- maybe the last thing should
613 be padded `forward' rather than this thing padding `back'.
615 period.from = min(period.from, *_last_audio_time);
617 if (period.duration() > one_video_frame()) {
618 period.to = period.from + one_video_frame();
621 _silent.set_position (period.to);
629 /* Emit any audio that is ready */
631 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
632 of our streams, or the position of the _silent.
634 DCPTime pull_to = _film->length ();
635 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
636 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
637 pull_to = i->second.last_push_end;
640 if (!_silent.done() && _silent.position() < pull_to) {
641 pull_to = _silent.position();
644 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
645 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
646 if (_last_audio_time && i->second < *_last_audio_time) {
647 /* This new data comes before the last we emitted (or the last seek); discard it */
648 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
653 } else if (_last_audio_time && i->second > *_last_audio_time) {
654 /* There's a gap between this data and the last we emitted; fill with silence */
655 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
658 emit_audio (i->first, i->second);
668 optional<PositionImage>
669 Player::subtitles_for_frame (DCPTime time) const
671 list<PositionImage> subtitles;
673 int const vfr = _film->video_frame_rate();
675 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
677 /* Image subtitles */
678 list<PositionImage> c = transform_image_subtitles (i.image);
679 copy (c.begin(), c.end(), back_inserter (subtitles));
681 /* Text subtitles (rendered to an image) */
682 if (!i.text.empty ()) {
683 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
684 copy (s.begin(), s.end(), back_inserter (subtitles));
688 if (subtitles.empty ()) {
689 return optional<PositionImage> ();
692 return merge (subtitles);
696 Player::video (weak_ptr<Piece> wp, ContentVideo video)
698 shared_ptr<Piece> piece = wp.lock ();
703 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
704 if (frc.skip && (video.frame % 2) == 1) {
708 /* Time of the first frame we will emit */
709 DCPTime const time = content_video_to_dcp (piece, video.frame);
711 /* Discard if it's before the content's period or the last accurate seek. We can't discard
712 if it's after the content's period here as in that case we still need to fill any gap between
713 `now' and the end of the content's period.
715 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
719 /* Fill gaps that we discover now that we have some video which needs to be emitted.
720 This is where we need to fill to.
722 DCPTime fill_to = min (time, piece->content->end());
724 if (_last_video_time) {
725 DCPTime fill_from = max (*_last_video_time, piece->content->position());
726 LastVideoMap::const_iterator last = _last_video.find (wp);
727 if (_film->three_d()) {
728 DCPTime j = fill_from;
729 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
730 if (eyes == EYES_BOTH) {
733 while (j < fill_to || eyes != video.eyes) {
734 if (last != _last_video.end()) {
735 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
736 copy->set_eyes (eyes);
737 emit_video (copy, j);
739 emit_video (black_player_video_frame(eyes), j);
741 if (eyes == EYES_RIGHT) {
742 j += one_video_frame();
744 eyes = increment_eyes (eyes);
747 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
748 if (last != _last_video.end()) {
749 emit_video (last->second, j);
751 emit_video (black_player_video_frame(EYES_BOTH), j);
757 _last_video[wp].reset (
760 piece->content->video->crop (),
761 piece->content->video->fade (video.frame),
762 piece->content->video->scale().size (
763 piece->content->video, _video_container_size, _film->frame_size ()
765 _video_container_size,
768 piece->content->video->colour_conversion ()
773 for (int i = 0; i < frc.repeat; ++i) {
774 if (t < piece->content->end()) {
775 emit_video (_last_video[wp], t);
777 t += one_video_frame ();
782 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
784 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
786 shared_ptr<Piece> piece = wp.lock ();
791 shared_ptr<AudioContent> content = piece->content->audio;
792 DCPOMATIC_ASSERT (content);
794 /* Compute time in the DCP */
795 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
796 /* And the end of this block in the DCP */
797 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
799 /* Remove anything that comes before the start or after the end of the content */
800 if (time < piece->content->position()) {
801 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
803 /* This audio is entirely discarded */
806 content_audio.audio = cut.first;
808 } else if (time > piece->content->end()) {
811 } else if (end > piece->content->end()) {
812 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
813 if (remaining_frames == 0) {
816 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
817 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
818 content_audio.audio = cut;
821 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
825 if (content->gain() != 0) {
826 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
827 gain->apply_gain (content->gain ());
828 content_audio.audio = gain;
833 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
837 if (_audio_processor) {
838 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
843 _audio_merger.push (content_audio.audio, time);
844 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
845 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
849 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
851 shared_ptr<Piece> piece = wp.lock ();
856 /* Apply content's subtitle offsets */
857 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
858 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
860 /* Apply content's subtitle scale */
861 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
862 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
864 /* Apply a corrective translation to keep the subtitle centred after that scale */
865 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
866 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
869 ps.image.push_back (subtitle.sub);
870 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
872 _active_subtitles.add_from (wp, ps, from);
876 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
878 shared_ptr<Piece> piece = wp.lock ();
884 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
886 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
887 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
888 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
889 float const xs = piece->content->subtitle->x_scale();
890 float const ys = piece->content->subtitle->y_scale();
891 float size = s.size();
893 /* Adjust size to express the common part of the scaling;
894 e.g. if xs = ys = 0.5 we scale size by 2.
896 if (xs > 1e-5 && ys > 1e-5) {
897 size *= 1 / min (1 / xs, 1 / ys);
901 /* Then express aspect ratio changes */
902 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
903 s.set_aspect_adjust (xs / ys);
906 s.set_in (dcp::Time(from.seconds(), 1000));
907 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
908 ps.add_fonts (piece->content->subtitle->fonts ());
911 _active_subtitles.add_from (wp, ps, from);
915 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
917 if (!_active_subtitles.have (wp)) {
921 shared_ptr<Piece> piece = wp.lock ();
926 DCPTime const dcp_to = content_time_to_dcp (piece, to);
928 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
930 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
931 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
936 Player::seek (DCPTime time, bool accurate)
938 if (!_have_valid_pieces) {
950 if (_audio_processor) {
951 _audio_processor->flush ();
954 _audio_merger.clear ();
955 _active_subtitles.clear ();
957 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
958 if (time < i->content->position()) {
959 /* Before; seek to 0 */
960 i->decoder->seek (ContentTime(), accurate);
962 } else if (i->content->position() <= time && time < i->content->end()) {
963 /* During; seek to position */
964 i->decoder->seek (dcp_to_content_time (i, time), accurate);
967 /* After; this piece is done */
973 _last_video_time = time;
974 _last_video_eyes = EYES_LEFT;
975 _last_audio_time = time;
977 _last_video_time = optional<DCPTime>();
978 _last_video_eyes = optional<Eyes>();
979 _last_audio_time = optional<DCPTime>();
982 _black.set_position (time);
983 _silent.set_position (time);
985 _last_video.clear ();
989 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
991 optional<PositionImage> subtitles = subtitles_for_frame (time);
993 pv->set_subtitle (subtitles.get ());
998 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
999 _last_video_time = time + one_video_frame();
1000 _active_subtitles.clear_before (time);
1002 _last_video_eyes = increment_eyes (pv->eyes());
1006 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1008 /* Log if the assert below is about to fail */
1009 if (_last_audio_time && time != *_last_audio_time) {
1010 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1013 /* This audio must follow on from the previous */
1014 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1016 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1020 Player::fill_audio (DCPTimePeriod period)
1022 if (period.from == period.to) {
1026 DCPOMATIC_ASSERT (period.from < period.to);
1028 DCPTime t = period.from;
1029 while (t < period.to) {
1030 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1031 Frame const samples = block.frames_round(_film->audio_frame_rate());
1033 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1034 silence->make_silent ();
1035 emit_audio (silence, t);
1042 Player::one_video_frame () const
1044 return DCPTime::from_frames (1, _film->video_frame_rate ());
1047 pair<shared_ptr<AudioBuffers>, DCPTime>
1048 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1050 DCPTime const discard_time = discard_to - time;
1051 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1052 Frame remaining_frames = audio->frames() - discard_frames;
1053 if (remaining_frames <= 0) {
1054 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1056 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1057 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1058 return make_pair(cut, time + discard_time);
1062 Player::set_dcp_decode_reduction (optional<int> reduction)
1064 if (reduction == _dcp_decode_reduction) {
1068 _dcp_decode_reduction = reduction;
1069 _have_valid_pieces = false;