2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_subtitle (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->subtitle && _ignore_subtitle) {
125 decoder->subtitle->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 dcp->set_decode_referenced ();
133 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134 _pieces.push_back (piece);
136 if (decoder->video) {
137 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140 if (decoder->audio) {
141 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144 if (decoder->subtitle) {
145 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
151 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152 if (i->content->audio) {
153 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154 _stream_states[j] = StreamState (i, i->content->position ());
159 _last_video_time = DCPTime ();
160 _last_audio_time = DCPTime ();
161 _have_valid_pieces = true;
165 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
167 shared_ptr<Content> c = w.lock ();
173 property == ContentProperty::POSITION ||
174 property == ContentProperty::LENGTH ||
175 property == ContentProperty::TRIM_START ||
176 property == ContentProperty::TRIM_END ||
177 property == ContentProperty::PATH ||
178 property == VideoContentProperty::FRAME_TYPE ||
179 property == DCPContentProperty::NEEDS_ASSETS ||
180 property == DCPContentProperty::NEEDS_KDM ||
181 property == SubtitleContentProperty::COLOUR ||
182 property == SubtitleContentProperty::OUTLINE ||
183 property == SubtitleContentProperty::SHADOW ||
184 property == SubtitleContentProperty::EFFECT_COLOUR ||
185 property == FFmpegContentProperty::SUBTITLE_STREAM ||
186 property == VideoContentProperty::COLOUR_CONVERSION
189 _have_valid_pieces = false;
193 property == SubtitleContentProperty::LINE_SPACING ||
194 property == SubtitleContentProperty::OUTLINE_WIDTH ||
195 property == SubtitleContentProperty::Y_SCALE ||
196 property == SubtitleContentProperty::FADE_IN ||
197 property == SubtitleContentProperty::FADE_OUT ||
198 property == ContentProperty::VIDEO_FRAME_RATE ||
199 property == SubtitleContentProperty::USE ||
200 property == SubtitleContentProperty::X_OFFSET ||
201 property == SubtitleContentProperty::Y_OFFSET ||
202 property == SubtitleContentProperty::X_SCALE ||
203 property == SubtitleContentProperty::FONTS ||
204 property == VideoContentProperty::CROP ||
205 property == VideoContentProperty::SCALE ||
206 property == VideoContentProperty::FADE_IN ||
207 property == VideoContentProperty::FADE_OUT
215 Player::set_video_container_size (dcp::Size s)
217 if (s == _video_container_size) {
221 _video_container_size = s;
223 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
224 _black_image->make_black ();
230 Player::playlist_changed ()
232 _have_valid_pieces = false;
237 Player::film_changed (Film::Property p)
239 /* Here we should notice Film properties that affect our output, and
240 alert listeners that our output now would be different to how it was
241 last time we were run.
244 if (p == Film::CONTAINER) {
246 } else if (p == Film::VIDEO_FRAME_RATE) {
247 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
248 so we need new pieces here.
250 _have_valid_pieces = false;
252 } else if (p == Film::AUDIO_PROCESSOR) {
253 if (_film->audio_processor ()) {
254 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
260 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
262 list<PositionImage> all;
264 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
269 /* We will scale the subtitle up to fit _video_container_size */
270 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
272 /* Then we need a corrective translation, consisting of two parts:
274 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
275 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
277 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
278 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
279 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
281 * Combining these two translations gives these expressions.
288 dcp::YUV_TO_RGB_REC601,
289 i->image->pixel_format (),
294 lrint (_video_container_size.width * i->rectangle.x),
295 lrint (_video_container_size.height * i->rectangle.y)
304 shared_ptr<PlayerVideo>
305 Player::black_player_video_frame () const
307 return shared_ptr<PlayerVideo> (
309 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
312 _video_container_size,
313 _video_container_size,
316 PresetColourConversion::all().front().conversion
322 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
324 DCPTime s = t - piece->content->position ();
325 s = min (piece->content->length_after_trim(), s);
326 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
328 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
329 then convert that ContentTime to frames at the content's rate. However this fails for
330 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
331 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
333 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
335 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
339 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
341 /* See comment in dcp_to_content_video */
342 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
343 return max (DCPTime (), d + piece->content->position ());
347 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
349 DCPTime s = t - piece->content->position ();
350 s = min (piece->content->length_after_trim(), s);
351 /* See notes in dcp_to_content_video */
352 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
356 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
358 /* See comment in dcp_to_content_video */
359 return DCPTime::from_frames (f, _film->audio_frame_rate())
360 - DCPTime (piece->content->trim_start(), piece->frc)
361 + piece->content->position();
365 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
367 DCPTime s = t - piece->content->position ();
368 s = min (piece->content->length_after_trim(), s);
369 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
373 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
375 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
378 list<shared_ptr<Font> >
379 Player::get_subtitle_fonts ()
381 if (!_have_valid_pieces) {
385 list<shared_ptr<Font> > fonts;
386 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
387 if (p->content->subtitle) {
388 /* XXX: things may go wrong if there are duplicate font IDs
389 with different font files.
391 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
392 copy (f.begin(), f.end(), back_inserter (fonts));
399 /** Set this player never to produce any video data */
401 Player::set_ignore_video ()
403 _ignore_video = true;
407 Player::set_ignore_subtitle ()
409 _ignore_subtitle = true;
412 /** Set whether or not this player should always burn text subtitles into the image,
413 * regardless of the content settings.
414 * @param burn true to always burn subtitles, false to obey content settings.
417 Player::set_always_burn_subtitles (bool burn)
419 _always_burn_subtitles = burn;
426 _have_valid_pieces = false;
430 Player::set_play_referenced ()
432 _play_referenced = true;
433 _have_valid_pieces = false;
436 list<ReferencedReelAsset>
437 Player::get_reel_assets ()
439 list<ReferencedReelAsset> a;
441 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
442 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
447 scoped_ptr<DCPDecoder> decoder;
449 decoder.reset (new DCPDecoder (j, _film->log()));
455 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
457 DCPOMATIC_ASSERT (j->video_frame_rate ());
458 double const cfr = j->video_frame_rate().get();
459 Frame const trim_start = j->trim_start().frames_round (cfr);
460 Frame const trim_end = j->trim_end().frames_round (cfr);
461 int const ffr = _film->video_frame_rate ();
463 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
464 if (j->reference_video ()) {
465 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
466 DCPOMATIC_ASSERT (ra);
467 ra->set_entry_point (ra->entry_point() + trim_start);
468 ra->set_duration (ra->duration() - trim_start - trim_end);
470 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
474 if (j->reference_audio ()) {
475 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
476 DCPOMATIC_ASSERT (ra);
477 ra->set_entry_point (ra->entry_point() + trim_start);
478 ra->set_duration (ra->duration() - trim_start - trim_end);
480 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
484 if (j->reference_subtitle ()) {
485 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
486 DCPOMATIC_ASSERT (ra);
487 ra->set_entry_point (ra->entry_point() + trim_start);
488 ra->set_duration (ra->duration() - trim_start - trim_end);
490 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
494 /* Assume that main picture duration is the length of the reel */
495 offset += k->main_picture()->duration ();
505 if (!_have_valid_pieces) {
509 if (_playlist->length() == DCPTime()) {
510 /* Special case of an empty Film; just give one black frame */
511 emit_video (black_player_video_frame(), DCPTime());
515 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
517 shared_ptr<Piece> earliest_content;
518 optional<DCPTime> earliest_time;
520 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
523 /* Given two choices at the same time, pick the one with a subtitle so we see it before
526 if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
528 earliest_content = i;
542 if (earliest_content) {
546 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
547 earliest_time = _black.position ();
551 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
552 earliest_time = _silent.position ();
558 earliest_content->done = earliest_content->decoder->pass ();
561 emit_video (black_player_video_frame(), _black.position());
562 _black.set_position (_black.position() + one_video_frame());
566 DCPTimePeriod period (_silent.period_at_position());
567 if (period.duration() > one_video_frame()) {
568 period.to = period.from + one_video_frame();
571 _silent.set_position (period.to);
579 /* Emit any audio that is ready */
581 DCPTime pull_to = _film->length ();
582 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
583 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
584 pull_to = i->second.last_push_end;
588 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
589 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
590 if (_last_audio_time && i->second < *_last_audio_time) {
591 /* There has been an accurate seek and we have received some audio before the seek time;
594 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
601 emit_audio (i->first, i->second);
607 optional<PositionImage>
608 Player::subtitles_for_frame (DCPTime time) const
610 list<PositionImage> subtitles;
612 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
614 /* Image subtitles */
615 list<PositionImage> c = transform_image_subtitles (i.image);
616 copy (c.begin(), c.end(), back_inserter (subtitles));
618 /* Text subtitles (rendered to an image) */
619 if (!i.text.empty ()) {
620 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
621 copy (s.begin(), s.end(), back_inserter (subtitles));
625 if (subtitles.empty ()) {
626 return optional<PositionImage> ();
629 return merge (subtitles);
633 Player::video (weak_ptr<Piece> wp, ContentVideo video)
635 shared_ptr<Piece> piece = wp.lock ();
640 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
641 if (frc.skip && (video.frame % 2) == 1) {
645 /* Time and period of the frame we will emit */
646 DCPTime const time = content_video_to_dcp (piece, video.frame);
647 DCPTimePeriod const period (time, time + one_video_frame());
649 /* Discard if it's outside the content's period or if it's before the last accurate seek */
651 time < piece->content->position() ||
652 time >= piece->content->end() ||
653 (_last_video_time && time < *_last_video_time)) {
657 /* Fill gaps that we discover now that we have some video which needs to be emitted */
659 if (_last_video_time) {
660 /* XXX: this may not work for 3D */
661 DCPTime fill_from = max (*_last_video_time, piece->content->position());
662 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
663 LastVideoMap::const_iterator k = _last_video.find (wp);
664 if (k != _last_video.end ()) {
665 emit_video (k->second, j);
667 emit_video (black_player_video_frame(), j);
672 _last_video[wp].reset (
675 piece->content->video->crop (),
676 piece->content->video->fade (video.frame),
677 piece->content->video->scale().size (
678 piece->content->video, _video_container_size, _film->frame_size ()
680 _video_container_size,
683 piece->content->video->colour_conversion ()
687 emit_video (_last_video[wp], time);
691 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
693 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
695 shared_ptr<Piece> piece = wp.lock ();
700 shared_ptr<AudioContent> content = piece->content->audio;
701 DCPOMATIC_ASSERT (content);
703 /* Compute time in the DCP */
704 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
705 /* And the end of this block in the DCP */
706 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
708 /* Remove anything that comes before the start or after the end of the content */
709 if (time < piece->content->position()) {
710 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
712 /* This audio is entirely discarded */
715 content_audio.audio = cut.first;
717 } else if (time > piece->content->end()) {
720 } else if (end > piece->content->end()) {
721 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
722 if (remaining_frames == 0) {
725 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
726 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
727 content_audio.audio = cut;
730 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
734 if (content->gain() != 0) {
735 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
736 gain->apply_gain (content->gain ());
737 content_audio.audio = gain;
742 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
746 if (_audio_processor) {
747 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
752 _audio_merger.push (content_audio.audio, time);
753 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
754 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
758 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
760 shared_ptr<Piece> piece = wp.lock ();
765 /* Apply content's subtitle offsets */
766 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
767 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
769 /* Apply content's subtitle scale */
770 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
771 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
773 /* Apply a corrective translation to keep the subtitle centred after that scale */
774 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
775 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
778 ps.image.push_back (subtitle.sub);
779 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
781 _active_subtitles.add_from (wp, ps, from);
785 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
787 shared_ptr<Piece> piece = wp.lock ();
793 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
795 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
796 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
797 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
798 float const xs = piece->content->subtitle->x_scale();
799 float const ys = piece->content->subtitle->y_scale();
800 float size = s.size();
802 /* Adjust size to express the common part of the scaling;
803 e.g. if xs = ys = 0.5 we scale size by 2.
805 if (xs > 1e-5 && ys > 1e-5) {
806 size *= 1 / min (1 / xs, 1 / ys);
810 /* Then express aspect ratio changes */
811 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
812 s.set_aspect_adjust (xs / ys);
815 s.set_in (dcp::Time(from.seconds(), 1000));
816 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
817 ps.add_fonts (piece->content->subtitle->fonts ());
820 _active_subtitles.add_from (wp, ps, from);
824 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
826 if (!_active_subtitles.have (wp)) {
830 shared_ptr<Piece> piece = wp.lock ();
835 DCPTime const dcp_to = content_time_to_dcp (piece, to);
837 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
839 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
840 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
845 Player::seek (DCPTime time, bool accurate)
847 if (_audio_processor) {
848 _audio_processor->flush ();
851 _audio_merger.clear ();
852 _active_subtitles.clear ();
854 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
855 if (time < i->content->position()) {
856 /* Before; seek to 0 */
857 i->decoder->seek (ContentTime(), accurate);
859 } else if (i->content->position() <= time && time < i->content->end()) {
860 /* During; seek to position */
861 i->decoder->seek (dcp_to_content_time (i, time), accurate);
864 /* After; this piece is done */
870 _last_video_time = time;
871 _last_audio_time = time;
873 _last_video_time = optional<DCPTime>();
874 _last_audio_time = optional<DCPTime>();
877 _black.set_position (time);
878 _silent.set_position (time);
880 _last_video.clear ();
884 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
886 optional<PositionImage> subtitles = subtitles_for_frame (time);
888 pv->set_subtitle (subtitles.get ());
893 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
894 _last_video_time = time + one_video_frame();
895 _active_subtitles.clear_before (time);
900 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
903 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
907 Player::fill_audio (DCPTimePeriod period)
909 if (period.from == period.to) {
913 DCPOMATIC_ASSERT (period.from < period.to);
915 DCPTime t = period.from;
916 while (t < period.to) {
917 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
918 Frame const samples = block.frames_round(_film->audio_frame_rate());
920 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
921 silence->make_silent ();
922 emit_audio (silence, t);
929 Player::one_video_frame () const
931 return DCPTime::from_frames (1, _film->video_frame_rate ());
934 pair<shared_ptr<AudioBuffers>, DCPTime>
935 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
937 DCPTime const discard_time = discard_to - time;
938 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
939 Frame remaining_frames = audio->frames() - discard_frames;
940 if (remaining_frames <= 0) {
941 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
943 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
944 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
945 return make_pair(cut, time + discard_time);