2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_subtitle (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->subtitle && _ignore_subtitle) {
125 decoder->subtitle->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 if (_play_referenced) {
131 dcp->set_decode_referenced ();
133 dcp->set_forced_reduction (_dcp_decode_reduction);
136 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
137 _pieces.push_back (piece);
139 if (decoder->video) {
140 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
143 if (decoder->audio) {
144 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
147 if (decoder->subtitle) {
148 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
150 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
154 _stream_states.clear ();
155 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
156 if (i->content->audio) {
157 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
158 _stream_states[j] = StreamState (i, i->content->position ());
163 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
164 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
166 _last_video_time = DCPTime ();
167 _last_audio_time = DCPTime ();
168 _have_valid_pieces = true;
172 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
174 shared_ptr<Content> c = w.lock ();
180 property == ContentProperty::POSITION ||
181 property == ContentProperty::LENGTH ||
182 property == ContentProperty::TRIM_START ||
183 property == ContentProperty::TRIM_END ||
184 property == ContentProperty::PATH ||
185 property == VideoContentProperty::FRAME_TYPE ||
186 property == DCPContentProperty::NEEDS_ASSETS ||
187 property == DCPContentProperty::NEEDS_KDM ||
188 property == SubtitleContentProperty::COLOUR ||
189 property == SubtitleContentProperty::EFFECT ||
190 property == SubtitleContentProperty::EFFECT_COLOUR ||
191 property == FFmpegContentProperty::SUBTITLE_STREAM ||
192 property == FFmpegContentProperty::FILTERS ||
193 property == VideoContentProperty::COLOUR_CONVERSION
196 _have_valid_pieces = false;
200 property == SubtitleContentProperty::LINE_SPACING ||
201 property == SubtitleContentProperty::OUTLINE_WIDTH ||
202 property == SubtitleContentProperty::Y_SCALE ||
203 property == SubtitleContentProperty::FADE_IN ||
204 property == SubtitleContentProperty::FADE_OUT ||
205 property == ContentProperty::VIDEO_FRAME_RATE ||
206 property == SubtitleContentProperty::USE ||
207 property == SubtitleContentProperty::X_OFFSET ||
208 property == SubtitleContentProperty::Y_OFFSET ||
209 property == SubtitleContentProperty::X_SCALE ||
210 property == SubtitleContentProperty::FONTS ||
211 property == VideoContentProperty::CROP ||
212 property == VideoContentProperty::SCALE ||
213 property == VideoContentProperty::FADE_IN ||
214 property == VideoContentProperty::FADE_OUT
222 Player::set_video_container_size (dcp::Size s)
224 if (s == _video_container_size) {
228 _video_container_size = s;
230 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
231 _black_image->make_black ();
237 Player::playlist_changed ()
239 _have_valid_pieces = false;
244 Player::film_changed (Film::Property p)
246 /* Here we should notice Film properties that affect our output, and
247 alert listeners that our output now would be different to how it was
248 last time we were run.
251 if (p == Film::CONTAINER) {
253 } else if (p == Film::VIDEO_FRAME_RATE) {
254 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
255 so we need new pieces here.
257 _have_valid_pieces = false;
259 } else if (p == Film::AUDIO_PROCESSOR) {
260 if (_film->audio_processor ()) {
261 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
267 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
269 list<PositionImage> all;
271 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
276 /* We will scale the subtitle up to fit _video_container_size */
277 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
279 /* Then we need a corrective translation, consisting of two parts:
281 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
282 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
284 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
285 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
286 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
288 * Combining these two translations gives these expressions.
295 dcp::YUV_TO_RGB_REC601,
296 i->image->pixel_format (),
301 lrint (_video_container_size.width * i->rectangle.x),
302 lrint (_video_container_size.height * i->rectangle.y)
311 shared_ptr<PlayerVideo>
312 Player::black_player_video_frame () const
314 return shared_ptr<PlayerVideo> (
316 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
319 _video_container_size,
320 _video_container_size,
323 PresetColourConversion::all().front().conversion
329 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
331 DCPTime s = t - piece->content->position ();
332 s = min (piece->content->length_after_trim(), s);
333 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
335 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
336 then convert that ContentTime to frames at the content's rate. However this fails for
337 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
338 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
340 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
342 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
346 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
348 /* See comment in dcp_to_content_video */
349 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
350 return d + piece->content->position();
354 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
356 DCPTime s = t - piece->content->position ();
357 s = min (piece->content->length_after_trim(), s);
358 /* See notes in dcp_to_content_video */
359 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
363 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
365 /* See comment in dcp_to_content_video */
366 return DCPTime::from_frames (f, _film->audio_frame_rate())
367 - DCPTime (piece->content->trim_start(), piece->frc)
368 + piece->content->position();
372 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
374 DCPTime s = t - piece->content->position ();
375 s = min (piece->content->length_after_trim(), s);
376 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
380 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
382 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
385 list<shared_ptr<Font> >
386 Player::get_subtitle_fonts ()
388 if (!_have_valid_pieces) {
392 list<shared_ptr<Font> > fonts;
393 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
394 if (p->content->subtitle) {
395 /* XXX: things may go wrong if there are duplicate font IDs
396 with different font files.
398 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
399 copy (f.begin(), f.end(), back_inserter (fonts));
406 /** Set this player never to produce any video data */
408 Player::set_ignore_video ()
410 _ignore_video = true;
414 Player::set_ignore_subtitle ()
416 _ignore_subtitle = true;
419 /** Set whether or not this player should always burn text subtitles into the image,
420 * regardless of the content settings.
421 * @param burn true to always burn subtitles, false to obey content settings.
424 Player::set_always_burn_subtitles (bool burn)
426 _always_burn_subtitles = burn;
429 /** Sets up the player to be faster, possibly at the expense of quality */
434 _have_valid_pieces = false;
438 Player::set_play_referenced ()
440 _play_referenced = true;
441 _have_valid_pieces = false;
444 list<ReferencedReelAsset>
445 Player::get_reel_assets ()
447 list<ReferencedReelAsset> a;
449 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
450 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
455 scoped_ptr<DCPDecoder> decoder;
457 decoder.reset (new DCPDecoder (j, _film->log(), false));
463 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465 DCPOMATIC_ASSERT (j->video_frame_rate ());
466 double const cfr = j->video_frame_rate().get();
467 Frame const trim_start = j->trim_start().frames_round (cfr);
468 Frame const trim_end = j->trim_end().frames_round (cfr);
469 int const ffr = _film->video_frame_rate ();
471 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
472 if (j->reference_video ()) {
473 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
474 DCPOMATIC_ASSERT (ra);
475 ra->set_entry_point (ra->entry_point() + trim_start);
476 ra->set_duration (ra->duration() - trim_start - trim_end);
478 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
482 if (j->reference_audio ()) {
483 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
484 DCPOMATIC_ASSERT (ra);
485 ra->set_entry_point (ra->entry_point() + trim_start);
486 ra->set_duration (ra->duration() - trim_start - trim_end);
488 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
492 if (j->reference_subtitle ()) {
493 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
494 DCPOMATIC_ASSERT (ra);
495 ra->set_entry_point (ra->entry_point() + trim_start);
496 ra->set_duration (ra->duration() - trim_start - trim_end);
498 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
502 /* Assume that main picture duration is the length of the reel */
503 offset += k->main_picture()->duration ();
513 if (!_have_valid_pieces) {
517 if (_playlist->length() == DCPTime()) {
518 /* Special case of an empty Film; just give one black frame */
519 emit_video (black_player_video_frame(), DCPTime());
523 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
525 shared_ptr<Piece> earliest_content;
526 optional<DCPTime> earliest_time;
528 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
533 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
534 if (t > i->content->end()) {
537 /* Given two choices at the same time, pick the one with a subtitle so we see it before
540 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
542 earliest_content = i;
556 if (earliest_content) {
560 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
561 earliest_time = _black.position ();
565 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
566 earliest_time = _silent.position ();
572 earliest_content->done = earliest_content->decoder->pass ();
575 emit_video (black_player_video_frame(), _black.position());
576 _black.set_position (_black.position() + one_video_frame());
580 DCPTimePeriod period (_silent.period_at_position());
581 if (period.duration() > one_video_frame()) {
582 period.to = period.from + one_video_frame();
585 _silent.set_position (period.to);
593 /* Emit any audio that is ready */
595 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
596 of our streams, or the position of the _silent.
598 DCPTime pull_to = _film->length ();
599 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
600 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
601 pull_to = i->second.last_push_end;
604 if (!_silent.done() && _silent.position() < pull_to) {
605 pull_to = _silent.position();
608 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
609 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
610 if (_last_audio_time && i->second < *_last_audio_time) {
611 /* This new data comes before the last we emitted (or the last seek); discard it */
612 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
617 } else if (_last_audio_time && i->second > *_last_audio_time) {
618 /* There's a gap between this data and the last we emitted; fill with silence */
619 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
622 emit_audio (i->first, i->second);
628 optional<PositionImage>
629 Player::subtitles_for_frame (DCPTime time) const
631 list<PositionImage> subtitles;
633 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate())), _always_burn_subtitles)) {
635 /* Image subtitles */
636 list<PositionImage> c = transform_image_subtitles (i.image);
637 copy (c.begin(), c.end(), back_inserter (subtitles));
639 /* Text subtitles (rendered to an image) */
640 if (!i.text.empty ()) {
641 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
642 copy (s.begin(), s.end(), back_inserter (subtitles));
646 if (subtitles.empty ()) {
647 return optional<PositionImage> ();
650 return merge (subtitles);
654 Player::video (weak_ptr<Piece> wp, ContentVideo video)
656 shared_ptr<Piece> piece = wp.lock ();
661 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
662 if (frc.skip && (video.frame % 2) == 1) {
666 /* Time of the first frame we will emit */
667 DCPTime const time = content_video_to_dcp (piece, video.frame);
669 /* Discard if it's outside the content's period or if it's before the last accurate seek */
671 time < piece->content->position() ||
672 time >= piece->content->end() ||
673 (_last_video_time && time < *_last_video_time)) {
677 /* Fill gaps that we discover now that we have some video which needs to be emitted */
679 if (_last_video_time) {
680 /* XXX: this may not work for 3D */
681 DCPTime fill_from = max (*_last_video_time, piece->content->position());
682 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
683 LastVideoMap::const_iterator k = _last_video.find (wp);
684 if (k != _last_video.end ()) {
685 emit_video (k->second, j);
687 emit_video (black_player_video_frame(), j);
692 _last_video[wp].reset (
695 piece->content->video->crop (),
696 piece->content->video->fade (video.frame),
697 piece->content->video->scale().size (
698 piece->content->video, _video_container_size, _film->frame_size ()
700 _video_container_size,
703 piece->content->video->colour_conversion ()
708 for (int i = 0; i < frc.repeat; ++i) {
709 emit_video (_last_video[wp], t);
710 t += one_video_frame ();
715 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
717 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
719 shared_ptr<Piece> piece = wp.lock ();
724 shared_ptr<AudioContent> content = piece->content->audio;
725 DCPOMATIC_ASSERT (content);
727 /* Compute time in the DCP */
728 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
729 /* And the end of this block in the DCP */
730 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
732 /* Remove anything that comes before the start or after the end of the content */
733 if (time < piece->content->position()) {
734 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
736 /* This audio is entirely discarded */
739 content_audio.audio = cut.first;
741 } else if (time > piece->content->end()) {
744 } else if (end > piece->content->end()) {
745 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
746 if (remaining_frames == 0) {
749 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
750 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
751 content_audio.audio = cut;
754 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
758 if (content->gain() != 0) {
759 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
760 gain->apply_gain (content->gain ());
761 content_audio.audio = gain;
766 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
770 if (_audio_processor) {
771 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
776 _audio_merger.push (content_audio.audio, time);
777 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
778 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
782 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
784 shared_ptr<Piece> piece = wp.lock ();
789 /* Apply content's subtitle offsets */
790 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
791 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
793 /* Apply content's subtitle scale */
794 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
795 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
797 /* Apply a corrective translation to keep the subtitle centred after that scale */
798 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
799 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
802 ps.image.push_back (subtitle.sub);
803 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
805 _active_subtitles.add_from (wp, ps, from);
809 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
811 shared_ptr<Piece> piece = wp.lock ();
817 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
819 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
820 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
821 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
822 float const xs = piece->content->subtitle->x_scale();
823 float const ys = piece->content->subtitle->y_scale();
824 float size = s.size();
826 /* Adjust size to express the common part of the scaling;
827 e.g. if xs = ys = 0.5 we scale size by 2.
829 if (xs > 1e-5 && ys > 1e-5) {
830 size *= 1 / min (1 / xs, 1 / ys);
834 /* Then express aspect ratio changes */
835 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
836 s.set_aspect_adjust (xs / ys);
839 s.set_in (dcp::Time(from.seconds(), 1000));
840 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
841 ps.add_fonts (piece->content->subtitle->fonts ());
844 _active_subtitles.add_from (wp, ps, from);
848 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
850 if (!_active_subtitles.have (wp)) {
854 shared_ptr<Piece> piece = wp.lock ();
859 DCPTime const dcp_to = content_time_to_dcp (piece, to);
861 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
863 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
864 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
869 Player::seek (DCPTime time, bool accurate)
871 if (!_have_valid_pieces) {
875 if (_audio_processor) {
876 _audio_processor->flush ();
879 _audio_merger.clear ();
880 _active_subtitles.clear ();
882 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
883 if (time < i->content->position()) {
884 /* Before; seek to 0 */
885 i->decoder->seek (ContentTime(), accurate);
887 } else if (i->content->position() <= time && time < i->content->end()) {
888 /* During; seek to position */
889 i->decoder->seek (dcp_to_content_time (i, time), accurate);
892 /* After; this piece is done */
898 _last_video_time = time;
899 _last_audio_time = time;
901 _last_video_time = optional<DCPTime>();
902 _last_audio_time = optional<DCPTime>();
905 _black.set_position (time);
906 _silent.set_position (time);
908 _last_video.clear ();
912 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
914 optional<PositionImage> subtitles = subtitles_for_frame (time);
916 pv->set_subtitle (subtitles.get ());
921 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
922 _last_video_time = time + one_video_frame();
923 _active_subtitles.clear_before (time);
928 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
930 /* This audio must follow on from the previous */
931 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
933 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
937 Player::fill_audio (DCPTimePeriod period)
939 if (period.from == period.to) {
943 DCPOMATIC_ASSERT (period.from < period.to);
945 DCPTime t = period.from;
946 while (t < period.to) {
947 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
948 Frame const samples = block.frames_round(_film->audio_frame_rate());
950 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
951 silence->make_silent ();
952 emit_audio (silence, t);
959 Player::one_video_frame () const
961 return DCPTime::from_frames (1, _film->video_frame_rate ());
964 pair<shared_ptr<AudioBuffers>, DCPTime>
965 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
967 DCPTime const discard_time = discard_to - time;
968 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
969 Frame remaining_frames = audio->frames() - discard_frames;
970 if (remaining_frames <= 0) {
971 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
973 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
974 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
975 return make_pair(cut, time + discard_time);
979 Player::set_dcp_decode_reduction (optional<int> reduction)
981 if (reduction == _dcp_decode_reduction) {
985 _dcp_decode_reduction = reduction;
986 _have_valid_pieces = false;