2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_subtitle (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->subtitle && _ignore_subtitle) {
125 decoder->subtitle->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 if (_play_referenced) {
131 dcp->set_decode_referenced ();
133 dcp->set_forced_reduction (_dcp_decode_reduction);
136 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
137 _pieces.push_back (piece);
139 if (decoder->video) {
140 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
143 if (decoder->audio) {
144 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
147 if (decoder->subtitle) {
148 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
150 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
154 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
155 if (i->content->audio) {
156 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
157 _stream_states[j] = StreamState (i, i->content->position ());
162 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
163 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
165 _last_video_time = DCPTime ();
166 _last_audio_time = DCPTime ();
167 _have_valid_pieces = true;
171 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
173 shared_ptr<Content> c = w.lock ();
179 property == ContentProperty::POSITION ||
180 property == ContentProperty::LENGTH ||
181 property == ContentProperty::TRIM_START ||
182 property == ContentProperty::TRIM_END ||
183 property == ContentProperty::PATH ||
184 property == VideoContentProperty::FRAME_TYPE ||
185 property == DCPContentProperty::NEEDS_ASSETS ||
186 property == DCPContentProperty::NEEDS_KDM ||
187 property == SubtitleContentProperty::COLOUR ||
188 property == SubtitleContentProperty::OUTLINE ||
189 property == SubtitleContentProperty::SHADOW ||
190 property == SubtitleContentProperty::EFFECT_COLOUR ||
191 property == FFmpegContentProperty::SUBTITLE_STREAM ||
192 property == VideoContentProperty::COLOUR_CONVERSION
195 _have_valid_pieces = false;
199 property == SubtitleContentProperty::LINE_SPACING ||
200 property == SubtitleContentProperty::OUTLINE_WIDTH ||
201 property == SubtitleContentProperty::Y_SCALE ||
202 property == SubtitleContentProperty::FADE_IN ||
203 property == SubtitleContentProperty::FADE_OUT ||
204 property == ContentProperty::VIDEO_FRAME_RATE ||
205 property == SubtitleContentProperty::USE ||
206 property == SubtitleContentProperty::X_OFFSET ||
207 property == SubtitleContentProperty::Y_OFFSET ||
208 property == SubtitleContentProperty::X_SCALE ||
209 property == SubtitleContentProperty::FONTS ||
210 property == VideoContentProperty::CROP ||
211 property == VideoContentProperty::SCALE ||
212 property == VideoContentProperty::FADE_IN ||
213 property == VideoContentProperty::FADE_OUT
221 Player::set_video_container_size (dcp::Size s)
223 if (s == _video_container_size) {
227 _video_container_size = s;
229 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
230 _black_image->make_black ();
236 Player::playlist_changed ()
238 _have_valid_pieces = false;
243 Player::film_changed (Film::Property p)
245 /* Here we should notice Film properties that affect our output, and
246 alert listeners that our output now would be different to how it was
247 last time we were run.
250 if (p == Film::CONTAINER) {
252 } else if (p == Film::VIDEO_FRAME_RATE) {
253 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
254 so we need new pieces here.
256 _have_valid_pieces = false;
258 } else if (p == Film::AUDIO_PROCESSOR) {
259 if (_film->audio_processor ()) {
260 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
266 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
268 list<PositionImage> all;
270 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
275 /* We will scale the subtitle up to fit _video_container_size */
276 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
278 /* Then we need a corrective translation, consisting of two parts:
280 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
281 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
283 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
284 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
285 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
287 * Combining these two translations gives these expressions.
294 dcp::YUV_TO_RGB_REC601,
295 i->image->pixel_format (),
300 lrint (_video_container_size.width * i->rectangle.x),
301 lrint (_video_container_size.height * i->rectangle.y)
310 shared_ptr<PlayerVideo>
311 Player::black_player_video_frame () const
313 return shared_ptr<PlayerVideo> (
315 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
318 _video_container_size,
319 _video_container_size,
322 PresetColourConversion::all().front().conversion
328 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
330 DCPTime s = t - piece->content->position ();
331 s = min (piece->content->length_after_trim(), s);
332 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
334 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
335 then convert that ContentTime to frames at the content's rate. However this fails for
336 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
337 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
339 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
341 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
345 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
347 /* See comment in dcp_to_content_video */
348 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
349 return max (DCPTime (), d + piece->content->position ());
353 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
355 DCPTime s = t - piece->content->position ();
356 s = min (piece->content->length_after_trim(), s);
357 /* See notes in dcp_to_content_video */
358 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
362 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
364 /* See comment in dcp_to_content_video */
365 return DCPTime::from_frames (f, _film->audio_frame_rate())
366 - DCPTime (piece->content->trim_start(), piece->frc)
367 + piece->content->position();
371 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
373 DCPTime s = t - piece->content->position ();
374 s = min (piece->content->length_after_trim(), s);
375 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
379 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
381 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
384 list<shared_ptr<Font> >
385 Player::get_subtitle_fonts ()
387 if (!_have_valid_pieces) {
391 list<shared_ptr<Font> > fonts;
392 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
393 if (p->content->subtitle) {
394 /* XXX: things may go wrong if there are duplicate font IDs
395 with different font files.
397 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
398 copy (f.begin(), f.end(), back_inserter (fonts));
405 /** Set this player never to produce any video data */
407 Player::set_ignore_video ()
409 _ignore_video = true;
413 Player::set_ignore_subtitle ()
415 _ignore_subtitle = true;
418 /** Set whether or not this player should always burn text subtitles into the image,
419 * regardless of the content settings.
420 * @param burn true to always burn subtitles, false to obey content settings.
423 Player::set_always_burn_subtitles (bool burn)
425 _always_burn_subtitles = burn;
428 /** Sets up the player to be faster, possibly at the expense of quality */
433 _have_valid_pieces = false;
437 Player::set_play_referenced ()
439 _play_referenced = true;
440 _have_valid_pieces = false;
443 list<ReferencedReelAsset>
444 Player::get_reel_assets ()
446 list<ReferencedReelAsset> a;
448 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
449 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
454 scoped_ptr<DCPDecoder> decoder;
456 decoder.reset (new DCPDecoder (j, _film->log(), false));
462 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
464 DCPOMATIC_ASSERT (j->video_frame_rate ());
465 double const cfr = j->video_frame_rate().get();
466 Frame const trim_start = j->trim_start().frames_round (cfr);
467 Frame const trim_end = j->trim_end().frames_round (cfr);
468 int const ffr = _film->video_frame_rate ();
470 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
471 if (j->reference_video ()) {
472 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
473 DCPOMATIC_ASSERT (ra);
474 ra->set_entry_point (ra->entry_point() + trim_start);
475 ra->set_duration (ra->duration() - trim_start - trim_end);
477 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
481 if (j->reference_audio ()) {
482 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
483 DCPOMATIC_ASSERT (ra);
484 ra->set_entry_point (ra->entry_point() + trim_start);
485 ra->set_duration (ra->duration() - trim_start - trim_end);
487 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
491 if (j->reference_subtitle ()) {
492 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
493 DCPOMATIC_ASSERT (ra);
494 ra->set_entry_point (ra->entry_point() + trim_start);
495 ra->set_duration (ra->duration() - trim_start - trim_end);
497 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
501 /* Assume that main picture duration is the length of the reel */
502 offset += k->main_picture()->duration ();
512 if (!_have_valid_pieces) {
516 if (_playlist->length() == DCPTime()) {
517 /* Special case of an empty Film; just give one black frame */
518 emit_video (black_player_video_frame(), DCPTime());
522 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
524 shared_ptr<Piece> earliest_content;
525 optional<DCPTime> earliest_time;
527 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
529 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
530 /* Given two choices at the same time, pick the one with a subtitle so we see it before
533 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
535 earliest_content = i;
549 if (earliest_content) {
553 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
554 earliest_time = _black.position ();
558 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
559 earliest_time = _silent.position ();
565 earliest_content->done = earliest_content->decoder->pass ();
568 emit_video (black_player_video_frame(), _black.position());
569 _black.set_position (_black.position() + one_video_frame());
573 DCPTimePeriod period (_silent.period_at_position());
574 if (period.duration() > one_video_frame()) {
575 period.to = period.from + one_video_frame();
578 _silent.set_position (period.to);
586 /* Emit any audio that is ready */
588 DCPTime pull_to = _film->length ();
589 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
590 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
591 pull_to = i->second.last_push_end;
595 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
596 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
597 if (_last_audio_time && i->second < *_last_audio_time) {
598 /* This new data comes before the last we emitted (or the last seek); discard it */
599 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
604 } else if (_last_audio_time && i->second > *_last_audio_time) {
605 /* There's a gap between this data and the last we emitted; fill with silence */
606 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
609 emit_audio (i->first, i->second);
615 optional<PositionImage>
616 Player::subtitles_for_frame (DCPTime time) const
618 list<PositionImage> subtitles;
620 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
622 /* Image subtitles */
623 list<PositionImage> c = transform_image_subtitles (i.image);
624 copy (c.begin(), c.end(), back_inserter (subtitles));
626 /* Text subtitles (rendered to an image) */
627 if (!i.text.empty ()) {
628 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
629 copy (s.begin(), s.end(), back_inserter (subtitles));
633 if (subtitles.empty ()) {
634 return optional<PositionImage> ();
637 return merge (subtitles);
641 Player::video (weak_ptr<Piece> wp, ContentVideo video)
643 shared_ptr<Piece> piece = wp.lock ();
648 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
649 if (frc.skip && (video.frame % 2) == 1) {
653 /* Time of the first frame we will emit */
654 DCPTime const time = content_video_to_dcp (piece, video.frame);
656 /* Discard if it's outside the content's period or if it's before the last accurate seek */
658 time < piece->content->position() ||
659 time >= piece->content->end() ||
660 (_last_video_time && time < *_last_video_time)) {
664 /* Fill gaps that we discover now that we have some video which needs to be emitted */
666 if (_last_video_time) {
667 /* XXX: this may not work for 3D */
668 DCPTime fill_from = max (*_last_video_time, piece->content->position());
669 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
670 LastVideoMap::const_iterator k = _last_video.find (wp);
671 if (k != _last_video.end ()) {
672 emit_video (k->second, j);
674 emit_video (black_player_video_frame(), j);
679 _last_video[wp].reset (
682 piece->content->video->crop (),
683 piece->content->video->fade (video.frame),
684 piece->content->video->scale().size (
685 piece->content->video, _video_container_size, _film->frame_size ()
687 _video_container_size,
690 piece->content->video->colour_conversion ()
695 for (int i = 0; i < frc.repeat; ++i) {
696 emit_video (_last_video[wp], t);
697 t += one_video_frame ();
703 /** @return Number of input frames that were `accepted'. This is the number of frames passed in
704 * unless some were discarded at the end of the block.
707 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
709 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
711 shared_ptr<Piece> piece = wp.lock ();
716 shared_ptr<AudioContent> content = piece->content->audio;
717 DCPOMATIC_ASSERT (content);
719 /* Compute time in the DCP */
720 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
721 /* And the end of this block in the DCP */
722 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
724 /* We consider frames trimmed off the beginning to nevertheless be `accepted'; it's only frames trimmed
725 off the end that are considered as discarded. This logic is necessary to ensure correct reel lengths,
726 although the precise details escape me at the moment.
728 Frame accepted = content_audio.audio->frames();
730 /* Remove anything that comes before the start or after the end of the content */
731 if (time < piece->content->position()) {
732 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
734 /* This audio is entirely discarded */
737 content_audio.audio = cut.first;
739 } else if (time > piece->content->end()) {
742 } else if (end > piece->content->end()) {
743 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
744 if (remaining_frames == 0) {
747 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
748 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
749 content_audio.audio = cut;
750 accepted = content_audio.audio->frames();
753 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
757 if (content->gain() != 0) {
758 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
759 gain->apply_gain (content->gain ());
760 content_audio.audio = gain;
765 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
769 if (_audio_processor) {
770 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
775 _audio_merger.push (content_audio.audio, time);
776 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
777 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
782 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
784 shared_ptr<Piece> piece = wp.lock ();
789 /* Apply content's subtitle offsets */
790 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
791 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
793 /* Apply content's subtitle scale */
794 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
795 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
797 /* Apply a corrective translation to keep the subtitle centred after that scale */
798 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
799 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
802 ps.image.push_back (subtitle.sub);
803 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
805 _active_subtitles.add_from (wp, ps, from);
809 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
811 shared_ptr<Piece> piece = wp.lock ();
817 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
819 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
820 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
821 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
822 float const xs = piece->content->subtitle->x_scale();
823 float const ys = piece->content->subtitle->y_scale();
824 float size = s.size();
826 /* Adjust size to express the common part of the scaling;
827 e.g. if xs = ys = 0.5 we scale size by 2.
829 if (xs > 1e-5 && ys > 1e-5) {
830 size *= 1 / min (1 / xs, 1 / ys);
834 /* Then express aspect ratio changes */
835 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
836 s.set_aspect_adjust (xs / ys);
839 s.set_in (dcp::Time(from.seconds(), 1000));
840 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
841 ps.add_fonts (piece->content->subtitle->fonts ());
844 _active_subtitles.add_from (wp, ps, from);
848 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
850 if (!_active_subtitles.have (wp)) {
854 shared_ptr<Piece> piece = wp.lock ();
859 DCPTime const dcp_to = content_time_to_dcp (piece, to);
861 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
863 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
864 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
869 Player::seek (DCPTime time, bool accurate)
871 if (!_have_valid_pieces) {
875 if (_audio_processor) {
876 _audio_processor->flush ();
879 _audio_merger.clear ();
880 _active_subtitles.clear ();
882 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
883 if (time < i->content->position()) {
884 /* Before; seek to 0 */
885 i->decoder->seek (ContentTime(), accurate);
887 } else if (i->content->position() <= time && time < i->content->end()) {
888 /* During; seek to position */
889 i->decoder->seek (dcp_to_content_time (i, time), accurate);
892 /* After; this piece is done */
898 _last_video_time = time;
899 _last_audio_time = time;
901 _last_video_time = optional<DCPTime>();
902 _last_audio_time = optional<DCPTime>();
905 _black.set_position (time);
906 _silent.set_position (time);
908 _last_video.clear ();
912 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
914 optional<PositionImage> subtitles = subtitles_for_frame (time);
916 pv->set_subtitle (subtitles.get ());
921 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
922 _last_video_time = time + one_video_frame();
923 _active_subtitles.clear_before (time);
928 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
931 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
935 Player::fill_audio (DCPTimePeriod period)
937 if (period.from == period.to) {
941 DCPOMATIC_ASSERT (period.from < period.to);
943 DCPTime t = period.from;
944 while (t < period.to) {
945 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
946 Frame const samples = block.frames_round(_film->audio_frame_rate());
948 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
949 silence->make_silent ();
950 emit_audio (silence, t);
957 Player::one_video_frame () const
959 return DCPTime::from_frames (1, _film->video_frame_rate ());
962 pair<shared_ptr<AudioBuffers>, DCPTime>
963 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
965 DCPTime const discard_time = discard_to - time;
966 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
967 Frame remaining_frames = audio->frames() - discard_frames;
968 if (remaining_frames <= 0) {
969 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
971 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
972 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
973 return make_pair(cut, time + discard_time);
977 Player::set_dcp_decode_reduction (optional<int> reduction)
979 if (reduction == _dcp_decode_reduction) {
983 _dcp_decode_reduction = reduction;
984 _have_valid_pieces = false;