2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_subtitle (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->subtitle && _ignore_subtitle) {
125 decoder->subtitle->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 dcp->set_decode_referenced ();
133 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134 _pieces.push_back (piece);
136 if (decoder->video) {
137 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140 if (decoder->audio) {
141 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144 if (decoder->subtitle) {
145 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
151 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152 if (i->content->audio) {
153 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154 _stream_states[j] = StreamState (i, i->content->position ());
159 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
160 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
162 _last_video_time = DCPTime ();
163 _last_audio_time = DCPTime ();
164 _have_valid_pieces = true;
168 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
170 shared_ptr<Content> c = w.lock ();
176 property == ContentProperty::POSITION ||
177 property == ContentProperty::LENGTH ||
178 property == ContentProperty::TRIM_START ||
179 property == ContentProperty::TRIM_END ||
180 property == ContentProperty::PATH ||
181 property == VideoContentProperty::FRAME_TYPE ||
182 property == DCPContentProperty::NEEDS_ASSETS ||
183 property == DCPContentProperty::NEEDS_KDM ||
184 property == SubtitleContentProperty::COLOUR ||
185 property == SubtitleContentProperty::OUTLINE ||
186 property == SubtitleContentProperty::SHADOW ||
187 property == SubtitleContentProperty::EFFECT_COLOUR ||
188 property == FFmpegContentProperty::SUBTITLE_STREAM ||
189 property == VideoContentProperty::COLOUR_CONVERSION
192 _have_valid_pieces = false;
196 property == SubtitleContentProperty::LINE_SPACING ||
197 property == SubtitleContentProperty::OUTLINE_WIDTH ||
198 property == SubtitleContentProperty::Y_SCALE ||
199 property == SubtitleContentProperty::FADE_IN ||
200 property == SubtitleContentProperty::FADE_OUT ||
201 property == ContentProperty::VIDEO_FRAME_RATE ||
202 property == SubtitleContentProperty::USE ||
203 property == SubtitleContentProperty::X_OFFSET ||
204 property == SubtitleContentProperty::Y_OFFSET ||
205 property == SubtitleContentProperty::X_SCALE ||
206 property == SubtitleContentProperty::FONTS ||
207 property == VideoContentProperty::CROP ||
208 property == VideoContentProperty::SCALE ||
209 property == VideoContentProperty::FADE_IN ||
210 property == VideoContentProperty::FADE_OUT
218 Player::set_video_container_size (dcp::Size s)
220 if (s == _video_container_size) {
224 _video_container_size = s;
226 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
227 _black_image->make_black ();
233 Player::playlist_changed ()
235 _have_valid_pieces = false;
240 Player::film_changed (Film::Property p)
242 /* Here we should notice Film properties that affect our output, and
243 alert listeners that our output now would be different to how it was
244 last time we were run.
247 if (p == Film::CONTAINER) {
249 } else if (p == Film::VIDEO_FRAME_RATE) {
250 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
251 so we need new pieces here.
253 _have_valid_pieces = false;
255 } else if (p == Film::AUDIO_PROCESSOR) {
256 if (_film->audio_processor ()) {
257 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
263 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
265 list<PositionImage> all;
267 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
272 /* We will scale the subtitle up to fit _video_container_size */
273 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
275 /* Then we need a corrective translation, consisting of two parts:
277 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
278 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
280 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
281 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
282 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
284 * Combining these two translations gives these expressions.
291 dcp::YUV_TO_RGB_REC601,
292 i->image->pixel_format (),
297 lrint (_video_container_size.width * i->rectangle.x),
298 lrint (_video_container_size.height * i->rectangle.y)
307 shared_ptr<PlayerVideo>
308 Player::black_player_video_frame () const
310 return shared_ptr<PlayerVideo> (
312 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
315 _video_container_size,
316 _video_container_size,
319 PresetColourConversion::all().front().conversion
325 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
327 DCPTime s = t - piece->content->position ();
328 s = min (piece->content->length_after_trim(), s);
329 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
331 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
332 then convert that ContentTime to frames at the content's rate. However this fails for
333 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
334 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
336 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
338 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
342 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
344 /* See comment in dcp_to_content_video */
345 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
346 return max (DCPTime (), d + piece->content->position ());
350 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
352 DCPTime s = t - piece->content->position ();
353 s = min (piece->content->length_after_trim(), s);
354 /* See notes in dcp_to_content_video */
355 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
359 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
361 /* See comment in dcp_to_content_video */
362 return DCPTime::from_frames (f, _film->audio_frame_rate())
363 - DCPTime (piece->content->trim_start(), piece->frc)
364 + piece->content->position();
368 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
370 DCPTime s = t - piece->content->position ();
371 s = min (piece->content->length_after_trim(), s);
372 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
376 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
378 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
381 list<shared_ptr<Font> >
382 Player::get_subtitle_fonts ()
384 if (!_have_valid_pieces) {
388 list<shared_ptr<Font> > fonts;
389 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
390 if (p->content->subtitle) {
391 /* XXX: things may go wrong if there are duplicate font IDs
392 with different font files.
394 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
395 copy (f.begin(), f.end(), back_inserter (fonts));
402 /** Set this player never to produce any video data */
404 Player::set_ignore_video ()
406 _ignore_video = true;
410 Player::set_ignore_subtitle ()
412 _ignore_subtitle = true;
415 /** Set whether or not this player should always burn text subtitles into the image,
416 * regardless of the content settings.
417 * @param burn true to always burn subtitles, false to obey content settings.
420 Player::set_always_burn_subtitles (bool burn)
422 _always_burn_subtitles = burn;
429 _have_valid_pieces = false;
433 Player::set_play_referenced ()
435 _play_referenced = true;
436 _have_valid_pieces = false;
439 list<ReferencedReelAsset>
440 Player::get_reel_assets ()
442 list<ReferencedReelAsset> a;
444 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
445 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
450 scoped_ptr<DCPDecoder> decoder;
452 decoder.reset (new DCPDecoder (j, _film->log()));
458 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
460 DCPOMATIC_ASSERT (j->video_frame_rate ());
461 double const cfr = j->video_frame_rate().get();
462 Frame const trim_start = j->trim_start().frames_round (cfr);
463 Frame const trim_end = j->trim_end().frames_round (cfr);
464 int const ffr = _film->video_frame_rate ();
466 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
467 if (j->reference_video ()) {
468 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
469 DCPOMATIC_ASSERT (ra);
470 ra->set_entry_point (ra->entry_point() + trim_start);
471 ra->set_duration (ra->duration() - trim_start - trim_end);
473 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
477 if (j->reference_audio ()) {
478 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
479 DCPOMATIC_ASSERT (ra);
480 ra->set_entry_point (ra->entry_point() + trim_start);
481 ra->set_duration (ra->duration() - trim_start - trim_end);
483 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
487 if (j->reference_subtitle ()) {
488 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
489 DCPOMATIC_ASSERT (ra);
490 ra->set_entry_point (ra->entry_point() + trim_start);
491 ra->set_duration (ra->duration() - trim_start - trim_end);
493 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
497 /* Assume that main picture duration is the length of the reel */
498 offset += k->main_picture()->duration ();
508 if (!_have_valid_pieces) {
512 if (_playlist->length() == DCPTime()) {
513 /* Special case of an empty Film; just give one black frame */
514 emit_video (black_player_video_frame(), DCPTime());
518 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
520 shared_ptr<Piece> earliest_content;
521 optional<DCPTime> earliest_time;
523 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
525 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
526 /* Given two choices at the same time, pick the one with a subtitle so we see it before
529 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
531 earliest_content = i;
545 if (earliest_content) {
549 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
550 earliest_time = _black.position ();
554 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
555 earliest_time = _silent.position ();
561 earliest_content->done = earliest_content->decoder->pass ();
564 emit_video (black_player_video_frame(), _black.position());
565 _black.set_position (_black.position() + one_video_frame());
569 DCPTimePeriod period (_silent.period_at_position());
570 if (period.duration() > one_video_frame()) {
571 period.to = period.from + one_video_frame();
574 _silent.set_position (period.to);
582 /* Emit any audio that is ready */
584 DCPTime pull_to = _film->length ();
585 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
586 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
587 pull_to = i->second.last_push_end;
591 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
592 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
593 if (_last_audio_time && i->second < *_last_audio_time) {
594 /* There has been an accurate seek and we have received some audio before the seek time;
597 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
604 emit_audio (i->first, i->second);
610 optional<PositionImage>
611 Player::subtitles_for_frame (DCPTime time) const
613 list<PositionImage> subtitles;
615 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
617 /* Image subtitles */
618 list<PositionImage> c = transform_image_subtitles (i.image);
619 copy (c.begin(), c.end(), back_inserter (subtitles));
621 /* Text subtitles (rendered to an image) */
622 if (!i.text.empty ()) {
623 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
624 copy (s.begin(), s.end(), back_inserter (subtitles));
628 if (subtitles.empty ()) {
629 return optional<PositionImage> ();
632 return merge (subtitles);
636 Player::video (weak_ptr<Piece> wp, ContentVideo video)
638 shared_ptr<Piece> piece = wp.lock ();
643 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
644 if (frc.skip && (video.frame % 2) == 1) {
648 /* Time of the first frame we will emit */
649 DCPTime const time = content_video_to_dcp (piece, video.frame);
651 /* Discard if it's outside the content's period or if it's before the last accurate seek */
653 time < piece->content->position() ||
654 time >= piece->content->end() ||
655 (_last_video_time && time < *_last_video_time)) {
659 /* Fill gaps that we discover now that we have some video which needs to be emitted */
661 if (_last_video_time) {
662 /* XXX: this may not work for 3D */
663 DCPTime fill_from = max (*_last_video_time, piece->content->position());
664 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
665 LastVideoMap::const_iterator k = _last_video.find (wp);
666 if (k != _last_video.end ()) {
667 emit_video (k->second, j);
669 emit_video (black_player_video_frame(), j);
674 _last_video[wp].reset (
677 piece->content->video->crop (),
678 piece->content->video->fade (video.frame),
679 piece->content->video->scale().size (
680 piece->content->video, _video_container_size, _film->frame_size ()
682 _video_container_size,
685 piece->content->video->colour_conversion ()
690 for (int i = 0; i < frc.repeat; ++i) {
691 emit_video (_last_video[wp], t);
692 t += one_video_frame ();
697 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
699 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
701 shared_ptr<Piece> piece = wp.lock ();
706 shared_ptr<AudioContent> content = piece->content->audio;
707 DCPOMATIC_ASSERT (content);
709 /* Compute time in the DCP */
710 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
711 /* And the end of this block in the DCP */
712 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
714 /* Remove anything that comes before the start or after the end of the content */
715 if (time < piece->content->position()) {
716 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
718 /* This audio is entirely discarded */
721 content_audio.audio = cut.first;
723 } else if (time > piece->content->end()) {
726 } else if (end > piece->content->end()) {
727 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
728 if (remaining_frames == 0) {
731 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
732 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
733 content_audio.audio = cut;
736 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
740 if (content->gain() != 0) {
741 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
742 gain->apply_gain (content->gain ());
743 content_audio.audio = gain;
748 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
752 if (_audio_processor) {
753 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
758 _audio_merger.push (content_audio.audio, time);
759 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
760 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
764 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
766 shared_ptr<Piece> piece = wp.lock ();
771 /* Apply content's subtitle offsets */
772 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
773 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
775 /* Apply content's subtitle scale */
776 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
777 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
779 /* Apply a corrective translation to keep the subtitle centred after that scale */
780 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
781 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
784 ps.image.push_back (subtitle.sub);
785 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
787 _active_subtitles.add_from (wp, ps, from);
791 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
793 shared_ptr<Piece> piece = wp.lock ();
799 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
801 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
802 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
803 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
804 float const xs = piece->content->subtitle->x_scale();
805 float const ys = piece->content->subtitle->y_scale();
806 float size = s.size();
808 /* Adjust size to express the common part of the scaling;
809 e.g. if xs = ys = 0.5 we scale size by 2.
811 if (xs > 1e-5 && ys > 1e-5) {
812 size *= 1 / min (1 / xs, 1 / ys);
816 /* Then express aspect ratio changes */
817 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
818 s.set_aspect_adjust (xs / ys);
821 s.set_in (dcp::Time(from.seconds(), 1000));
822 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
823 ps.add_fonts (piece->content->subtitle->fonts ());
826 _active_subtitles.add_from (wp, ps, from);
830 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
832 if (!_active_subtitles.have (wp)) {
836 shared_ptr<Piece> piece = wp.lock ();
841 DCPTime const dcp_to = content_time_to_dcp (piece, to);
843 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
845 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
846 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
851 Player::seek (DCPTime time, bool accurate)
853 if (_audio_processor) {
854 _audio_processor->flush ();
857 _audio_merger.clear ();
858 _active_subtitles.clear ();
860 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
861 if (time < i->content->position()) {
862 /* Before; seek to 0 */
863 i->decoder->seek (ContentTime(), accurate);
865 } else if (i->content->position() <= time && time < i->content->end()) {
866 /* During; seek to position */
867 i->decoder->seek (dcp_to_content_time (i, time), accurate);
870 /* After; this piece is done */
876 _last_video_time = time;
877 _last_audio_time = time;
879 _last_video_time = optional<DCPTime>();
880 _last_audio_time = optional<DCPTime>();
883 _black.set_position (time);
884 _silent.set_position (time);
886 _last_video.clear ();
890 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
892 optional<PositionImage> subtitles = subtitles_for_frame (time);
894 pv->set_subtitle (subtitles.get ());
899 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
900 _last_video_time = time + one_video_frame();
901 _active_subtitles.clear_before (time);
906 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
909 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
913 Player::fill_audio (DCPTimePeriod period)
915 if (period.from == period.to) {
919 DCPOMATIC_ASSERT (period.from < period.to);
921 DCPTime t = period.from;
922 while (t < period.to) {
923 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
924 Frame const samples = block.frames_round(_film->audio_frame_rate());
926 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
927 silence->make_silent ();
928 emit_audio (silence, t);
935 Player::one_video_frame () const
937 return DCPTime::from_frames (1, _film->video_frame_rate ());
940 pair<shared_ptr<AudioBuffers>, DCPTime>
941 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
943 DCPTime const discard_time = discard_to - time;
944 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
945 Frame remaining_frames = audio->frames() - discard_frames;
946 if (remaining_frames <= 0) {
947 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
949 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
950 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
951 return make_pair(cut, time + discard_time);