2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83 , _playlist (playlist)
84 , _have_valid_pieces (false)
85 , _ignore_video (false)
86 , _ignore_subtitle (false)
87 , _always_burn_subtitles (false)
89 , _play_referenced (false)
90 , _audio_merger (_film->audio_frame_rate())
93 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96 set_video_container_size (_film->frame_size ());
98 film_changed (Film::AUDIO_PROCESSOR);
100 seek (DCPTime (), true);
109 Player::setup_pieces ()
114 _shuffler = new Shuffler();
115 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
117 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
119 if (!i->paths_valid ()) {
123 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
124 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
127 /* Not something that we can decode; e.g. Atmos content */
131 if (decoder->video && _ignore_video) {
132 decoder->video->set_ignore (true);
135 if (decoder->subtitle && _ignore_subtitle) {
136 decoder->subtitle->set_ignore (true);
139 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
141 dcp->set_decode_referenced (_play_referenced);
142 if (_play_referenced) {
143 dcp->set_forced_reduction (_dcp_decode_reduction);
147 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
148 _pieces.push_back (piece);
150 if (decoder->video) {
151 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
152 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
154 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
158 if (decoder->audio) {
159 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
162 if (decoder->subtitle) {
163 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
164 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
165 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
169 _stream_states.clear ();
170 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
171 if (i->content->audio) {
172 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
173 _stream_states[j] = StreamState (i, i->content->position ());
178 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
179 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
181 _last_video_time = DCPTime ();
182 _last_video_eyes = EYES_BOTH;
183 _last_audio_time = DCPTime ();
184 _have_valid_pieces = true;
188 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
190 shared_ptr<Content> c = w.lock ();
196 property == ContentProperty::POSITION ||
197 property == ContentProperty::LENGTH ||
198 property == ContentProperty::TRIM_START ||
199 property == ContentProperty::TRIM_END ||
200 property == ContentProperty::PATH ||
201 property == VideoContentProperty::FRAME_TYPE ||
202 property == DCPContentProperty::NEEDS_ASSETS ||
203 property == DCPContentProperty::NEEDS_KDM ||
204 property == SubtitleContentProperty::COLOUR ||
205 property == SubtitleContentProperty::EFFECT ||
206 property == SubtitleContentProperty::EFFECT_COLOUR ||
207 property == FFmpegContentProperty::SUBTITLE_STREAM ||
208 property == FFmpegContentProperty::FILTERS ||
209 property == VideoContentProperty::COLOUR_CONVERSION
212 _have_valid_pieces = false;
216 property == SubtitleContentProperty::LINE_SPACING ||
217 property == SubtitleContentProperty::OUTLINE_WIDTH ||
218 property == SubtitleContentProperty::Y_SCALE ||
219 property == SubtitleContentProperty::FADE_IN ||
220 property == SubtitleContentProperty::FADE_OUT ||
221 property == ContentProperty::VIDEO_FRAME_RATE ||
222 property == SubtitleContentProperty::USE ||
223 property == SubtitleContentProperty::X_OFFSET ||
224 property == SubtitleContentProperty::Y_OFFSET ||
225 property == SubtitleContentProperty::X_SCALE ||
226 property == SubtitleContentProperty::FONTS ||
227 property == VideoContentProperty::CROP ||
228 property == VideoContentProperty::SCALE ||
229 property == VideoContentProperty::FADE_IN ||
230 property == VideoContentProperty::FADE_OUT
238 Player::set_video_container_size (dcp::Size s)
240 if (s == _video_container_size) {
244 _video_container_size = s;
246 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
247 _black_image->make_black ();
253 Player::playlist_changed ()
255 _have_valid_pieces = false;
260 Player::film_changed (Film::Property p)
262 /* Here we should notice Film properties that affect our output, and
263 alert listeners that our output now would be different to how it was
264 last time we were run.
267 if (p == Film::CONTAINER) {
269 } else if (p == Film::VIDEO_FRAME_RATE) {
270 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
271 so we need new pieces here.
273 _have_valid_pieces = false;
275 } else if (p == Film::AUDIO_PROCESSOR) {
276 if (_film->audio_processor ()) {
277 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
283 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
285 list<PositionImage> all;
287 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
292 /* We will scale the subtitle up to fit _video_container_size */
293 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
295 /* Then we need a corrective translation, consisting of two parts:
297 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
298 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
300 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
301 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
302 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
304 * Combining these two translations gives these expressions.
311 dcp::YUV_TO_RGB_REC601,
312 i->image->pixel_format (),
317 lrint (_video_container_size.width * i->rectangle.x),
318 lrint (_video_container_size.height * i->rectangle.y)
327 shared_ptr<PlayerVideo>
328 Player::black_player_video_frame (Eyes eyes) const
330 return shared_ptr<PlayerVideo> (
332 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
335 _video_container_size,
336 _video_container_size,
339 PresetColourConversion::all().front().conversion
345 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
347 DCPTime s = t - piece->content->position ();
348 s = min (piece->content->length_after_trim(), s);
349 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
351 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
352 then convert that ContentTime to frames at the content's rate. However this fails for
353 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
354 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
356 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
358 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
362 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
364 /* See comment in dcp_to_content_video */
365 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
366 return d + piece->content->position();
370 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
372 DCPTime s = t - piece->content->position ();
373 s = min (piece->content->length_after_trim(), s);
374 /* See notes in dcp_to_content_video */
375 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
379 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
381 /* See comment in dcp_to_content_video */
382 return DCPTime::from_frames (f, _film->audio_frame_rate())
383 - DCPTime (piece->content->trim_start(), piece->frc)
384 + piece->content->position();
388 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
390 DCPTime s = t - piece->content->position ();
391 s = min (piece->content->length_after_trim(), s);
392 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
396 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
398 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
401 list<shared_ptr<Font> >
402 Player::get_subtitle_fonts ()
404 if (!_have_valid_pieces) {
408 list<shared_ptr<Font> > fonts;
409 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
410 if (p->content->subtitle) {
411 /* XXX: things may go wrong if there are duplicate font IDs
412 with different font files.
414 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
415 copy (f.begin(), f.end(), back_inserter (fonts));
422 /** Set this player never to produce any video data */
424 Player::set_ignore_video ()
426 _ignore_video = true;
430 Player::set_ignore_subtitle ()
432 _ignore_subtitle = true;
435 /** Set whether or not this player should always burn text subtitles into the image,
436 * regardless of the content settings.
437 * @param burn true to always burn subtitles, false to obey content settings.
440 Player::set_always_burn_subtitles (bool burn)
442 _always_burn_subtitles = burn;
445 /** Sets up the player to be faster, possibly at the expense of quality */
450 _have_valid_pieces = false;
454 Player::set_play_referenced ()
456 _play_referenced = true;
457 _have_valid_pieces = false;
460 list<ReferencedReelAsset>
461 Player::get_reel_assets ()
463 list<ReferencedReelAsset> a;
465 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
466 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
471 scoped_ptr<DCPDecoder> decoder;
473 decoder.reset (new DCPDecoder (j, _film->log(), false));
479 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
481 DCPOMATIC_ASSERT (j->video_frame_rate ());
482 double const cfr = j->video_frame_rate().get();
483 Frame const trim_start = j->trim_start().frames_round (cfr);
484 Frame const trim_end = j->trim_end().frames_round (cfr);
485 int const ffr = _film->video_frame_rate ();
487 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
488 if (j->reference_video ()) {
489 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
490 DCPOMATIC_ASSERT (ra);
491 ra->set_entry_point (ra->entry_point() + trim_start);
492 ra->set_duration (ra->duration() - trim_start - trim_end);
494 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
498 if (j->reference_audio ()) {
499 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
500 DCPOMATIC_ASSERT (ra);
501 ra->set_entry_point (ra->entry_point() + trim_start);
502 ra->set_duration (ra->duration() - trim_start - trim_end);
504 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
508 if (j->reference_subtitle ()) {
509 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
510 DCPOMATIC_ASSERT (ra);
511 ra->set_entry_point (ra->entry_point() + trim_start);
512 ra->set_duration (ra->duration() - trim_start - trim_end);
514 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
518 /* Assume that main picture duration is the length of the reel */
519 offset += k->main_picture()->duration ();
529 if (!_have_valid_pieces) {
533 if (_playlist->length() == DCPTime()) {
534 /* Special case of an empty Film; just give one black frame */
535 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
539 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
541 shared_ptr<Piece> earliest_content;
542 optional<DCPTime> earliest_time;
544 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
549 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
550 if (t > i->content->end()) {
554 /* Given two choices at the same time, pick the one with a subtitle so we see it before
557 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
559 earliest_content = i;
573 if (earliest_content) {
577 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
578 earliest_time = _black.position ();
582 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
583 earliest_time = _silent.position ();
589 earliest_content->done = earliest_content->decoder->pass ();
592 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
593 _black.set_position (_black.position() + one_video_frame());
597 DCPTimePeriod period (_silent.period_at_position());
598 if (_last_audio_time) {
599 /* Sometimes the thing that happened last finishes fractionally before
600 this silence. Bodge the start time of the silence to fix it. I'm
601 not sure if this is the right solution --- maybe the last thing should
602 be padded `forward' rather than this thing padding `back'.
604 period.from = min(period.from, *_last_audio_time);
606 if (period.duration() > one_video_frame()) {
607 period.to = period.from + one_video_frame();
610 _silent.set_position (period.to);
618 /* Emit any audio that is ready */
620 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
621 of our streams, or the position of the _silent.
623 DCPTime pull_to = _film->length ();
624 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
625 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
626 pull_to = i->second.last_push_end;
629 if (!_silent.done() && _silent.position() < pull_to) {
630 pull_to = _silent.position();
633 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
634 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
635 if (_last_audio_time && i->second < *_last_audio_time) {
636 /* This new data comes before the last we emitted (or the last seek); discard it */
637 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
642 } else if (_last_audio_time && i->second > *_last_audio_time) {
643 /* There's a gap between this data and the last we emitted; fill with silence */
644 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
647 emit_audio (i->first, i->second);
656 optional<PositionImage>
657 Player::subtitles_for_frame (DCPTime time) const
659 list<PositionImage> subtitles;
661 int const vfr = _film->video_frame_rate();
663 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
665 /* Image subtitles */
666 list<PositionImage> c = transform_image_subtitles (i.image);
667 copy (c.begin(), c.end(), back_inserter (subtitles));
669 /* Text subtitles (rendered to an image) */
670 if (!i.text.empty ()) {
671 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
672 copy (s.begin(), s.end(), back_inserter (subtitles));
676 if (subtitles.empty ()) {
677 return optional<PositionImage> ();
680 return merge (subtitles);
684 Player::video (weak_ptr<Piece> wp, ContentVideo video)
686 shared_ptr<Piece> piece = wp.lock ();
691 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
692 if (frc.skip && (video.frame % 2) == 1) {
696 /* Time of the first frame we will emit */
697 DCPTime const time = content_video_to_dcp (piece, video.frame);
699 /* Discard if it's before the content's period or the last accurate seek. We can't discard
700 if it's after the content's period here as in that case we still need to fill any gap between
701 `now' and the end of the content's period.
703 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
707 /* Fill gaps that we discover now that we have some video which needs to be emitted.
708 This is where we need to fill to.
710 DCPTime fill_to = min (time, piece->content->end());
712 if (_last_video_time) {
713 DCPTime fill_from = max (*_last_video_time, piece->content->position());
714 LastVideoMap::const_iterator last = _last_video.find (wp);
715 if (_film->three_d()) {
716 DCPTime j = fill_from;
717 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
718 if (eyes == EYES_BOTH) {
721 while (j < fill_to || eyes != video.eyes) {
722 if (last != _last_video.end()) {
723 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
724 copy->set_eyes (eyes);
725 emit_video (copy, j);
727 emit_video (black_player_video_frame(eyes), j);
729 if (eyes == EYES_RIGHT) {
730 j += one_video_frame();
732 eyes = increment_eyes (eyes);
735 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
736 if (last != _last_video.end()) {
737 emit_video (last->second, j);
739 emit_video (black_player_video_frame(EYES_BOTH), j);
745 _last_video[wp].reset (
748 piece->content->video->crop (),
749 piece->content->video->fade (video.frame),
750 piece->content->video->scale().size (
751 piece->content->video, _video_container_size, _film->frame_size ()
753 _video_container_size,
756 piece->content->video->colour_conversion ()
761 for (int i = 0; i < frc.repeat; ++i) {
762 if (t < piece->content->end()) {
763 emit_video (_last_video[wp], t);
765 t += one_video_frame ();
770 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
772 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
774 shared_ptr<Piece> piece = wp.lock ();
779 shared_ptr<AudioContent> content = piece->content->audio;
780 DCPOMATIC_ASSERT (content);
782 /* Compute time in the DCP */
783 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
784 /* And the end of this block in the DCP */
785 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
787 /* Remove anything that comes before the start or after the end of the content */
788 if (time < piece->content->position()) {
789 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
791 /* This audio is entirely discarded */
794 content_audio.audio = cut.first;
796 } else if (time > piece->content->end()) {
799 } else if (end > piece->content->end()) {
800 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
801 if (remaining_frames == 0) {
804 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
805 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
806 content_audio.audio = cut;
809 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
813 if (content->gain() != 0) {
814 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
815 gain->apply_gain (content->gain ());
816 content_audio.audio = gain;
821 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
825 if (_audio_processor) {
826 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
831 _audio_merger.push (content_audio.audio, time);
832 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
833 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
837 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
839 shared_ptr<Piece> piece = wp.lock ();
844 /* Apply content's subtitle offsets */
845 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
846 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
848 /* Apply content's subtitle scale */
849 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
850 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
852 /* Apply a corrective translation to keep the subtitle centred after that scale */
853 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
854 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
857 ps.image.push_back (subtitle.sub);
858 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
860 _active_subtitles.add_from (wp, ps, from);
864 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
866 shared_ptr<Piece> piece = wp.lock ();
872 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
874 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
875 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
876 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
877 float const xs = piece->content->subtitle->x_scale();
878 float const ys = piece->content->subtitle->y_scale();
879 float size = s.size();
881 /* Adjust size to express the common part of the scaling;
882 e.g. if xs = ys = 0.5 we scale size by 2.
884 if (xs > 1e-5 && ys > 1e-5) {
885 size *= 1 / min (1 / xs, 1 / ys);
889 /* Then express aspect ratio changes */
890 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
891 s.set_aspect_adjust (xs / ys);
894 s.set_in (dcp::Time(from.seconds(), 1000));
895 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
896 ps.add_fonts (piece->content->subtitle->fonts ());
899 _active_subtitles.add_from (wp, ps, from);
903 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
905 if (!_active_subtitles.have (wp)) {
909 shared_ptr<Piece> piece = wp.lock ();
914 DCPTime const dcp_to = content_time_to_dcp (piece, to);
916 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
918 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
919 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
924 Player::seek (DCPTime time, bool accurate)
926 if (!_have_valid_pieces) {
934 if (_audio_processor) {
935 _audio_processor->flush ();
938 _audio_merger.clear ();
939 _active_subtitles.clear ();
941 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
942 if (time < i->content->position()) {
943 /* Before; seek to 0 */
944 i->decoder->seek (ContentTime(), accurate);
946 } else if (i->content->position() <= time && time < i->content->end()) {
947 /* During; seek to position */
948 i->decoder->seek (dcp_to_content_time (i, time), accurate);
951 /* After; this piece is done */
957 _last_video_time = time;
958 _last_video_eyes = EYES_LEFT;
959 _last_audio_time = time;
961 _last_video_time = optional<DCPTime>();
962 _last_video_eyes = optional<Eyes>();
963 _last_audio_time = optional<DCPTime>();
966 _black.set_position (time);
967 _silent.set_position (time);
969 _last_video.clear ();
973 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
975 optional<PositionImage> subtitles = subtitles_for_frame (time);
977 pv->set_subtitle (subtitles.get ());
982 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
983 _last_video_time = time + one_video_frame();
984 _active_subtitles.clear_before (time);
986 _last_video_eyes = increment_eyes (pv->eyes());
990 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
992 /* Log if the assert below is about to fail */
993 if (_last_audio_time && time != *_last_audio_time) {
994 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
997 /* This audio must follow on from the previous */
998 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1000 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1004 Player::fill_audio (DCPTimePeriod period)
1006 if (period.from == period.to) {
1010 DCPOMATIC_ASSERT (period.from < period.to);
1012 DCPTime t = period.from;
1013 while (t < period.to) {
1014 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1015 Frame const samples = block.frames_round(_film->audio_frame_rate());
1017 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1018 silence->make_silent ();
1019 emit_audio (silence, t);
1026 Player::one_video_frame () const
1028 return DCPTime::from_frames (1, _film->video_frame_rate ());
1031 pair<shared_ptr<AudioBuffers>, DCPTime>
1032 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1034 DCPTime const discard_time = discard_to - time;
1035 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1036 Frame remaining_frames = audio->frames() - discard_frames;
1037 if (remaining_frames <= 0) {
1038 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1040 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1041 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1042 return make_pair(cut, time + discard_time);
1046 Player::set_dcp_decode_reduction (optional<int> reduction)
1048 if (reduction == _dcp_decode_reduction) {
1052 _dcp_decode_reduction = reduction;
1053 _have_valid_pieces = false;