2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_audio (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->audio && _ignore_audio) {
125 decoder->audio->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 dcp->set_decode_referenced ();
133 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134 _pieces.push_back (piece);
136 if (decoder->video) {
137 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140 if (decoder->audio) {
141 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144 if (decoder->subtitle) {
145 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
151 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152 if (i->content->audio) {
153 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154 _stream_states[j] = StreamState (i, i->content->position ());
159 _black = Empty (_playlist, bind(&Content::video, _1));
160 _silent = Empty (_playlist, bind(&Content::audio, _1));
162 _last_video_time = DCPTime ();
163 _last_audio_time = DCPTime ();
164 _have_valid_pieces = true;
168 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
170 shared_ptr<Content> c = w.lock ();
176 property == ContentProperty::POSITION ||
177 property == ContentProperty::LENGTH ||
178 property == ContentProperty::TRIM_START ||
179 property == ContentProperty::TRIM_END ||
180 property == ContentProperty::PATH ||
181 property == VideoContentProperty::FRAME_TYPE ||
182 property == DCPContentProperty::NEEDS_ASSETS ||
183 property == DCPContentProperty::NEEDS_KDM ||
184 property == SubtitleContentProperty::COLOUR ||
185 property == SubtitleContentProperty::OUTLINE ||
186 property == SubtitleContentProperty::SHADOW ||
187 property == SubtitleContentProperty::EFFECT_COLOUR ||
188 property == FFmpegContentProperty::SUBTITLE_STREAM ||
189 property == VideoContentProperty::COLOUR_CONVERSION
192 _have_valid_pieces = false;
196 property == SubtitleContentProperty::LINE_SPACING ||
197 property == SubtitleContentProperty::OUTLINE_WIDTH ||
198 property == SubtitleContentProperty::Y_SCALE ||
199 property == SubtitleContentProperty::FADE_IN ||
200 property == SubtitleContentProperty::FADE_OUT ||
201 property == ContentProperty::VIDEO_FRAME_RATE ||
202 property == SubtitleContentProperty::USE ||
203 property == SubtitleContentProperty::X_OFFSET ||
204 property == SubtitleContentProperty::Y_OFFSET ||
205 property == SubtitleContentProperty::X_SCALE ||
206 property == SubtitleContentProperty::FONTS ||
207 property == VideoContentProperty::CROP ||
208 property == VideoContentProperty::SCALE ||
209 property == VideoContentProperty::FADE_IN ||
210 property == VideoContentProperty::FADE_OUT
218 Player::set_video_container_size (dcp::Size s)
220 if (s == _video_container_size) {
224 _video_container_size = s;
226 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
227 _black_image->make_black ();
233 Player::playlist_changed ()
235 _have_valid_pieces = false;
240 Player::film_changed (Film::Property p)
242 /* Here we should notice Film properties that affect our output, and
243 alert listeners that our output now would be different to how it was
244 last time we were run.
247 if (p == Film::CONTAINER) {
249 } else if (p == Film::VIDEO_FRAME_RATE) {
250 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
251 so we need new pieces here.
253 _have_valid_pieces = false;
255 } else if (p == Film::AUDIO_PROCESSOR) {
256 if (_film->audio_processor ()) {
257 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
263 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
265 list<PositionImage> all;
267 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
272 /* We will scale the subtitle up to fit _video_container_size */
273 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
275 /* Then we need a corrective translation, consisting of two parts:
277 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
278 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
280 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
281 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
282 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
284 * Combining these two translations gives these expressions.
291 dcp::YUV_TO_RGB_REC601,
292 i->image->pixel_format (),
297 lrint (_video_container_size.width * i->rectangle.x),
298 lrint (_video_container_size.height * i->rectangle.y)
307 shared_ptr<PlayerVideo>
308 Player::black_player_video_frame () const
310 return shared_ptr<PlayerVideo> (
312 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
315 _video_container_size,
316 _video_container_size,
319 PresetColourConversion::all().front().conversion
325 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
327 DCPTime s = t - piece->content->position ();
328 s = min (piece->content->length_after_trim(), s);
329 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
331 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
332 then convert that ContentTime to frames at the content's rate. However this fails for
333 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
334 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
336 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
338 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
342 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
344 /* See comment in dcp_to_content_video */
345 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
346 return max (DCPTime (), d + piece->content->position ());
350 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
352 DCPTime s = t - piece->content->position ();
353 s = min (piece->content->length_after_trim(), s);
354 /* See notes in dcp_to_content_video */
355 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
359 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
361 /* See comment in dcp_to_content_video */
362 return DCPTime::from_frames (f, _film->audio_frame_rate())
363 - DCPTime (piece->content->trim_start(), piece->frc)
364 + piece->content->position();
368 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
370 DCPTime s = t - piece->content->position ();
371 s = min (piece->content->length_after_trim(), s);
372 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
376 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
378 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
381 list<shared_ptr<Font> >
382 Player::get_subtitle_fonts ()
384 if (!_have_valid_pieces) {
388 list<shared_ptr<Font> > fonts;
389 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
390 if (p->content->subtitle) {
391 /* XXX: things may go wrong if there are duplicate font IDs
392 with different font files.
394 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
395 copy (f.begin(), f.end(), back_inserter (fonts));
402 /** Set this player never to produce any video data */
404 Player::set_ignore_video ()
406 _ignore_video = true;
409 /** Set whether or not this player should always burn text subtitles into the image,
410 * regardless of the content settings.
411 * @param burn true to always burn subtitles, false to obey content settings.
414 Player::set_always_burn_subtitles (bool burn)
416 _always_burn_subtitles = burn;
423 _have_valid_pieces = false;
427 Player::set_play_referenced ()
429 _play_referenced = true;
430 _have_valid_pieces = false;
433 list<ReferencedReelAsset>
434 Player::get_reel_assets ()
436 list<ReferencedReelAsset> a;
438 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
439 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
444 scoped_ptr<DCPDecoder> decoder;
446 decoder.reset (new DCPDecoder (j, _film->log()));
452 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
454 DCPOMATIC_ASSERT (j->video_frame_rate ());
455 double const cfr = j->video_frame_rate().get();
456 Frame const trim_start = j->trim_start().frames_round (cfr);
457 Frame const trim_end = j->trim_end().frames_round (cfr);
458 int const ffr = _film->video_frame_rate ();
460 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
461 if (j->reference_video ()) {
462 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
463 DCPOMATIC_ASSERT (ra);
464 ra->set_entry_point (ra->entry_point() + trim_start);
465 ra->set_duration (ra->duration() - trim_start - trim_end);
467 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
471 if (j->reference_audio ()) {
472 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
473 DCPOMATIC_ASSERT (ra);
474 ra->set_entry_point (ra->entry_point() + trim_start);
475 ra->set_duration (ra->duration() - trim_start - trim_end);
477 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
481 if (j->reference_subtitle ()) {
482 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
483 DCPOMATIC_ASSERT (ra);
484 ra->set_entry_point (ra->entry_point() + trim_start);
485 ra->set_duration (ra->duration() - trim_start - trim_end);
487 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
491 /* Assume that main picture duration is the length of the reel */
492 offset += k->main_picture()->duration ();
502 if (!_have_valid_pieces) {
506 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
508 shared_ptr<Piece> earliest;
509 DCPTime earliest_content;
511 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
513 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
514 /* Given two choices at the same time, pick the one with a subtitle so we see it before
517 if (!earliest || t < earliest_content || (t == earliest_content && i->decoder->subtitle)) {
518 earliest_content = t;
526 if (!_black.done() && (!earliest || _black.position() < earliest_content)) {
527 /* There is some black that must be emitted */
528 emit_video (black_player_video_frame(), _black.position());
529 _black.set_position (_black.position() + one_video_frame());
530 } else if (!_silent.done() && (!earliest || _silent.position() < earliest_content)) {
531 /* There is some silence that must be emitted */
532 DCPTimePeriod period (_silent.period_at_position());
533 if (period.duration() > one_video_frame()) {
534 period.to = period.from + one_video_frame();
537 _silent.set_position (period.to);
538 } else if (_playlist->length() == DCPTime()) {
539 /* Special case of an empty Film; just give one black frame */
540 emit_video (black_player_video_frame(), DCPTime());
541 } else if (earliest) {
542 earliest->done = earliest->decoder->pass ();
547 /* Emit any audio that is ready */
549 DCPTime pull_to = _playlist->length ();
550 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
551 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
552 pull_to = i->second.last_push_end;
556 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
557 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
558 if (_last_audio_time && i->second < *_last_audio_time) {
559 /* There has been an accurate seek and we have received some audio before the seek time;
562 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
569 emit_audio (i->first, i->second);
575 optional<PositionImage>
576 Player::subtitles_for_frame (DCPTime time) const
578 list<PositionImage> subtitles;
580 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
582 /* Image subtitles */
583 list<PositionImage> c = transform_image_subtitles (i.image);
584 copy (c.begin(), c.end(), back_inserter (subtitles));
586 /* Text subtitles (rendered to an image) */
587 if (!i.text.empty ()) {
588 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
589 copy (s.begin(), s.end(), back_inserter (subtitles));
593 if (subtitles.empty ()) {
594 return optional<PositionImage> ();
597 return merge (subtitles);
601 Player::video (weak_ptr<Piece> wp, ContentVideo video)
603 shared_ptr<Piece> piece = wp.lock ();
608 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
609 if (frc.skip && (video.frame % 2) == 1) {
613 /* Time and period of the frame we will emit */
614 DCPTime const time = content_video_to_dcp (piece, video.frame);
615 DCPTimePeriod const period (time, time + one_video_frame());
617 /* Fill gaps that we discover now that we have some video which needs to be emitted */
619 if (_last_video_time) {
620 /* XXX: this may not work for 3D */
621 DCPTime fill_from = max (*_last_video_time, piece->content->position());
622 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
623 LastVideoMap::const_iterator k = _last_video.find (wp);
624 if (k != _last_video.end ()) {
625 emit_video (k->second, j);
627 emit_video (black_player_video_frame(), j);
632 /* Discard if it's outside the content's period or if it's before the last accurate seek */
634 time < piece->content->position() ||
635 time >= piece->content->end() ||
636 (_last_video_time && time < *_last_video_time)) {
640 _last_video[wp].reset (
643 piece->content->video->crop (),
644 piece->content->video->fade (video.frame),
645 piece->content->video->scale().size (
646 piece->content->video, _video_container_size, _film->frame_size ()
648 _video_container_size,
651 piece->content->video->colour_conversion ()
655 emit_video (_last_video[wp], time);
659 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
661 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
663 shared_ptr<Piece> piece = wp.lock ();
668 shared_ptr<AudioContent> content = piece->content->audio;
669 DCPOMATIC_ASSERT (content);
671 /* Compute time in the DCP */
672 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
673 /* And the end of this block in the DCP */
674 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
676 /* Remove anything that comes before the start or after the end of the content */
677 if (time < piece->content->position()) {
678 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
680 /* This audio is entirely discarded */
683 content_audio.audio = cut.first;
685 } else if (time > piece->content->end()) {
688 } else if (end > piece->content->end()) {
689 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
690 if (remaining_frames == 0) {
693 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
694 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
695 content_audio.audio = cut;
698 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
702 if (content->gain() != 0) {
703 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
704 gain->apply_gain (content->gain ());
705 content_audio.audio = gain;
710 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
714 if (_audio_processor) {
715 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
720 _audio_merger.push (content_audio.audio, time);
721 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
722 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
726 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
728 shared_ptr<Piece> piece = wp.lock ();
733 /* Apply content's subtitle offsets */
734 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
735 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
737 /* Apply content's subtitle scale */
738 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
739 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
741 /* Apply a corrective translation to keep the subtitle centred after that scale */
742 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
743 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
746 ps.image.push_back (subtitle.sub);
747 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
749 _active_subtitles.add_from (wp, ps, from);
753 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
755 shared_ptr<Piece> piece = wp.lock ();
761 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
763 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
764 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
765 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
766 float const xs = piece->content->subtitle->x_scale();
767 float const ys = piece->content->subtitle->y_scale();
768 float size = s.size();
770 /* Adjust size to express the common part of the scaling;
771 e.g. if xs = ys = 0.5 we scale size by 2.
773 if (xs > 1e-5 && ys > 1e-5) {
774 size *= 1 / min (1 / xs, 1 / ys);
778 /* Then express aspect ratio changes */
779 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
780 s.set_aspect_adjust (xs / ys);
783 s.set_in (dcp::Time(from.seconds(), 1000));
784 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
785 ps.add_fonts (piece->content->subtitle->fonts ());
788 _active_subtitles.add_from (wp, ps, from);
792 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
794 if (!_active_subtitles.have (wp)) {
798 shared_ptr<Piece> piece = wp.lock ();
803 DCPTime const dcp_to = content_time_to_dcp (piece, to);
805 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
807 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
808 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
813 Player::seek (DCPTime time, bool accurate)
815 if (_audio_processor) {
816 _audio_processor->flush ();
819 _audio_merger.clear ();
820 _active_subtitles.clear ();
822 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
823 if (time < i->content->position()) {
824 /* Before; seek to 0 */
825 i->decoder->seek (ContentTime(), accurate);
827 } else if (i->content->position() <= time && time < i->content->end()) {
828 /* During; seek to position */
829 i->decoder->seek (dcp_to_content_time (i, time), accurate);
832 /* After; this piece is done */
838 _last_video_time = time;
839 _last_audio_time = time;
841 _last_video_time = optional<DCPTime>();
842 _last_audio_time = optional<DCPTime>();
845 _black.set_position (time);
846 _silent.set_position (time);
848 _last_video.clear ();
852 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
854 optional<PositionImage> subtitles = subtitles_for_frame (time);
856 pv->set_subtitle (subtitles.get ());
861 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
862 _last_video_time = time + one_video_frame();
863 _active_subtitles.clear_before (time);
868 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
871 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
875 Player::fill_audio (DCPTimePeriod period)
877 if (period.from == period.to) {
881 DCPOMATIC_ASSERT (period.from < period.to);
883 DCPTime t = period.from;
884 while (t < period.to) {
885 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
886 Frame const samples = block.frames_round(_film->audio_frame_rate());
888 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
889 silence->make_silent ();
890 emit_audio (silence, t);
897 Player::one_video_frame () const
899 return DCPTime::from_frames (1, _film->video_frame_rate ());
902 pair<shared_ptr<AudioBuffers>, DCPTime>
903 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
905 DCPTime const discard_time = discard_to - time;
906 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
907 Frame remaining_frames = audio->frames() - discard_frames;
908 if (remaining_frames <= 0) {
909 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
911 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
912 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
913 return make_pair(cut, time + discard_time);