2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_audio (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->audio && _ignore_audio) {
125 decoder->audio->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 dcp->set_decode_referenced ();
133 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134 _pieces.push_back (piece);
136 if (decoder->video) {
137 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140 if (decoder->audio) {
141 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144 if (decoder->subtitle) {
145 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
151 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152 if (i->content->audio) {
153 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154 _stream_states[j] = StreamState (i, i->content->position ());
159 _last_video_time = DCPTime ();
160 _last_audio_time = DCPTime ();
161 _have_valid_pieces = true;
165 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
167 shared_ptr<Content> c = w.lock ();
173 property == ContentProperty::POSITION ||
174 property == ContentProperty::LENGTH ||
175 property == ContentProperty::TRIM_START ||
176 property == ContentProperty::TRIM_END ||
177 property == ContentProperty::PATH ||
178 property == VideoContentProperty::FRAME_TYPE ||
179 property == DCPContentProperty::NEEDS_ASSETS ||
180 property == DCPContentProperty::NEEDS_KDM ||
181 property == SubtitleContentProperty::COLOUR ||
182 property == SubtitleContentProperty::OUTLINE ||
183 property == SubtitleContentProperty::SHADOW ||
184 property == SubtitleContentProperty::EFFECT_COLOUR ||
185 property == FFmpegContentProperty::SUBTITLE_STREAM ||
186 property == VideoContentProperty::COLOUR_CONVERSION
189 _have_valid_pieces = false;
193 property == SubtitleContentProperty::LINE_SPACING ||
194 property == SubtitleContentProperty::OUTLINE_WIDTH ||
195 property == SubtitleContentProperty::Y_SCALE ||
196 property == SubtitleContentProperty::FADE_IN ||
197 property == SubtitleContentProperty::FADE_OUT ||
198 property == ContentProperty::VIDEO_FRAME_RATE ||
199 property == SubtitleContentProperty::USE ||
200 property == SubtitleContentProperty::X_OFFSET ||
201 property == SubtitleContentProperty::Y_OFFSET ||
202 property == SubtitleContentProperty::X_SCALE ||
203 property == SubtitleContentProperty::FONTS ||
204 property == VideoContentProperty::CROP ||
205 property == VideoContentProperty::SCALE ||
206 property == VideoContentProperty::FADE_IN ||
207 property == VideoContentProperty::FADE_OUT
215 Player::set_video_container_size (dcp::Size s)
217 if (s == _video_container_size) {
221 _video_container_size = s;
223 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
224 _black_image->make_black ();
230 Player::playlist_changed ()
232 _have_valid_pieces = false;
237 Player::film_changed (Film::Property p)
239 /* Here we should notice Film properties that affect our output, and
240 alert listeners that our output now would be different to how it was
241 last time we were run.
244 if (p == Film::CONTAINER) {
246 } else if (p == Film::VIDEO_FRAME_RATE) {
247 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
248 so we need new pieces here.
250 _have_valid_pieces = false;
252 } else if (p == Film::AUDIO_PROCESSOR) {
253 if (_film->audio_processor ()) {
254 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
260 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
262 list<PositionImage> all;
264 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
269 /* We will scale the subtitle up to fit _video_container_size */
270 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
272 /* Then we need a corrective translation, consisting of two parts:
274 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
275 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
277 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
278 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
279 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
281 * Combining these two translations gives these expressions.
288 dcp::YUV_TO_RGB_REC601,
289 i->image->pixel_format (),
294 lrint (_video_container_size.width * i->rectangle.x),
295 lrint (_video_container_size.height * i->rectangle.y)
304 shared_ptr<PlayerVideo>
305 Player::black_player_video_frame () const
307 return shared_ptr<PlayerVideo> (
309 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
312 _video_container_size,
313 _video_container_size,
316 PresetColourConversion::all().front().conversion
322 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
324 DCPTime s = t - piece->content->position ();
325 s = min (piece->content->length_after_trim(), s);
326 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
328 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
329 then convert that ContentTime to frames at the content's rate. However this fails for
330 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
331 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
333 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
335 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
339 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
341 /* See comment in dcp_to_content_video */
342 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
343 return max (DCPTime (), d + piece->content->position ());
347 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
349 DCPTime s = t - piece->content->position ();
350 s = min (piece->content->length_after_trim(), s);
351 /* See notes in dcp_to_content_video */
352 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
356 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
358 /* See comment in dcp_to_content_video */
359 DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
360 return max (DCPTime (), d + piece->content->position ());
364 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
366 DCPTime s = t - piece->content->position ();
367 s = min (piece->content->length_after_trim(), s);
368 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
372 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
374 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
377 list<shared_ptr<Font> >
378 Player::get_subtitle_fonts ()
380 if (!_have_valid_pieces) {
384 list<shared_ptr<Font> > fonts;
385 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
386 if (p->content->subtitle) {
387 /* XXX: things may go wrong if there are duplicate font IDs
388 with different font files.
390 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
391 copy (f.begin(), f.end(), back_inserter (fonts));
398 /** Set this player never to produce any video data */
400 Player::set_ignore_video ()
402 _ignore_video = true;
405 /** Set whether or not this player should always burn text subtitles into the image,
406 * regardless of the content settings.
407 * @param burn true to always burn subtitles, false to obey content settings.
410 Player::set_always_burn_subtitles (bool burn)
412 _always_burn_subtitles = burn;
419 _have_valid_pieces = false;
423 Player::set_play_referenced ()
425 _play_referenced = true;
426 _have_valid_pieces = false;
429 list<ReferencedReelAsset>
430 Player::get_reel_assets ()
432 list<ReferencedReelAsset> a;
434 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
435 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
440 scoped_ptr<DCPDecoder> decoder;
442 decoder.reset (new DCPDecoder (j, _film->log()));
448 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
450 DCPOMATIC_ASSERT (j->video_frame_rate ());
451 double const cfr = j->video_frame_rate().get();
452 Frame const trim_start = j->trim_start().frames_round (cfr);
453 Frame const trim_end = j->trim_end().frames_round (cfr);
454 int const ffr = _film->video_frame_rate ();
456 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
457 if (j->reference_video ()) {
458 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
459 DCPOMATIC_ASSERT (ra);
460 ra->set_entry_point (ra->entry_point() + trim_start);
461 ra->set_duration (ra->duration() - trim_start - trim_end);
463 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
467 if (j->reference_audio ()) {
468 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
469 DCPOMATIC_ASSERT (ra);
470 ra->set_entry_point (ra->entry_point() + trim_start);
471 ra->set_duration (ra->duration() - trim_start - trim_end);
473 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
477 if (j->reference_subtitle ()) {
478 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
479 DCPOMATIC_ASSERT (ra);
480 ra->set_entry_point (ra->entry_point() + trim_start);
481 ra->set_duration (ra->duration() - trim_start - trim_end);
483 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
487 /* Assume that main picture duration is the length of the reel */
488 offset += k->main_picture()->duration ();
498 if (!_have_valid_pieces) {
504 if (_last_video_time && !_playlist->video_content_at(*_last_video_time) && *_last_video_time < _playlist->length()) {
505 /* _last_video_time is the time just after the last video we emitted, and there is no video content
506 at this time so we need to emit some black.
508 emit_video (black_player_video_frame(), *_last_video_time);
510 } else if (_playlist->length() == DCPTime()) {
511 /* Special case of an empty Film; just give one black frame */
512 emit_video (black_player_video_frame(), DCPTime());
516 if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time) && *_last_audio_time < _playlist->length()) {
517 /* _last_audio_time is the time just after the last audio we emitted. There is no audio here
518 so we need to emit some silence.
520 shared_ptr<Content> next = _playlist->next_audio_content(*_last_audio_time);
521 DCPTimePeriod period (*_last_audio_time, next ? next->position() : _playlist->length());
522 if (period.duration() > one_video_frame()) {
523 period = DCPTimePeriod (*_last_audio_time, *_last_audio_time + one_video_frame());
529 /* Now pass() the decoder which is farthest behind where we are */
531 shared_ptr<Piece> earliest;
532 DCPTime earliest_content;
534 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
536 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
537 if (!earliest || t < earliest_content) {
538 earliest_content = t;
544 if (!filled && earliest) {
545 earliest->done = earliest->decoder->pass ();
548 /* Emit any audio that is ready */
550 DCPTime pull_to = _playlist->length ();
551 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
552 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
553 pull_to = i->second.last_push_end;
557 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
558 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
559 if (_last_audio_time && i->second < *_last_audio_time) {
560 /* There has been an accurate seek and we have received some audio before the seek time;
563 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
570 if (_last_audio_time) {
571 /* Fill in the gap before delayed audio; this doesn't need to take into account
572 periods with no audio as it should only occur in delayed audio case.
574 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
577 emit_audio (i->first, i->second);
580 return !earliest && !filled;
583 optional<PositionImage>
584 Player::subtitles_for_frame (DCPTime time) const
586 list<PositionImage> subtitles;
588 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
590 /* Image subtitles */
591 list<PositionImage> c = transform_image_subtitles (i.image);
592 copy (c.begin(), c.end(), back_inserter (subtitles));
594 /* Text subtitles (rendered to an image) */
595 if (!i.text.empty ()) {
596 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
597 copy (s.begin(), s.end(), back_inserter (subtitles));
601 if (subtitles.empty ()) {
602 return optional<PositionImage> ();
605 return merge (subtitles);
609 Player::video (weak_ptr<Piece> wp, ContentVideo video)
611 shared_ptr<Piece> piece = wp.lock ();
616 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
617 if (frc.skip && (video.frame % 2) == 1) {
621 /* Time and period of the frame we will emit */
622 DCPTime const time = content_video_to_dcp (piece, video.frame);
623 DCPTimePeriod const period (time, time + one_video_frame());
625 /* Fill gaps that we discover now that we have some video which needs to be emitted */
627 if (_last_video_time) {
628 /* XXX: this may not work for 3D */
629 DCPTime fill_from = max (*_last_video_time, piece->content->position());
630 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
631 LastVideoMap::const_iterator k = _last_video.find (wp);
632 if (k != _last_video.end ()) {
633 emit_video (k->second, j);
635 emit_video (black_player_video_frame(), j);
640 /* Discard if it's outside the content's period or if it's before the last accurate seek */
642 time < piece->content->position() ||
643 time >= piece->content->end() ||
644 (_last_video_time && time < *_last_video_time)) {
648 _last_video[wp].reset (
651 piece->content->video->crop (),
652 piece->content->video->fade (video.frame),
653 piece->content->video->scale().size (
654 piece->content->video, _video_container_size, _film->frame_size ()
656 _video_container_size,
659 piece->content->video->colour_conversion ()
663 emit_video (_last_video[wp], time);
666 /** Do our common processing on some audio */
668 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
670 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
674 if (content->gain() != 0) {
675 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
676 gain->apply_gain (content->gain ());
677 content_audio.audio = gain;
682 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
686 if (_audio_processor) {
687 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
692 _audio_merger.push (content_audio.audio, time);
693 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
694 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
698 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
700 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
702 shared_ptr<Piece> piece = wp.lock ();
707 shared_ptr<AudioContent> content = piece->content->audio;
708 DCPOMATIC_ASSERT (content);
710 /* Compute time in the DCP */
711 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
712 /* And the end of this block in the DCP */
713 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
715 /* Remove anything that comes before the start or after the end of the content */
716 if (time < piece->content->position()) {
717 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
719 /* This audio is entirely discarded */
722 content_audio.audio = cut.first;
724 } else if (time > piece->content->end()) {
727 } else if (end > piece->content->end()) {
728 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
729 if (remaining_frames == 0) {
732 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
733 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
734 content_audio.audio = cut;
737 audio_transform (content, stream, content_audio, time);
741 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
743 shared_ptr<Piece> piece = wp.lock ();
748 /* Apply content's subtitle offsets */
749 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
750 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
752 /* Apply content's subtitle scale */
753 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
754 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
756 /* Apply a corrective translation to keep the subtitle centred after that scale */
757 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
758 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
761 ps.image.push_back (subtitle.sub);
762 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
764 _active_subtitles.add_from (wp, ps, from);
768 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
770 shared_ptr<Piece> piece = wp.lock ();
776 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
778 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
779 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
780 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
781 float const xs = piece->content->subtitle->x_scale();
782 float const ys = piece->content->subtitle->y_scale();
783 float size = s.size();
785 /* Adjust size to express the common part of the scaling;
786 e.g. if xs = ys = 0.5 we scale size by 2.
788 if (xs > 1e-5 && ys > 1e-5) {
789 size *= 1 / min (1 / xs, 1 / ys);
793 /* Then express aspect ratio changes */
794 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
795 s.set_aspect_adjust (xs / ys);
798 s.set_in (dcp::Time(from.seconds(), 1000));
799 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
800 ps.add_fonts (piece->content->subtitle->fonts ());
803 _active_subtitles.add_from (wp, ps, from);
807 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
809 if (!_active_subtitles.have (wp)) {
813 shared_ptr<Piece> piece = wp.lock ();
818 DCPTime const dcp_to = content_time_to_dcp (piece, to);
820 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
822 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
823 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
828 Player::seek (DCPTime time, bool accurate)
830 if (_audio_processor) {
831 _audio_processor->flush ();
834 _audio_merger.clear ();
835 _active_subtitles.clear ();
837 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
838 if (time < i->content->position()) {
839 /* Before; seek to 0 */
840 i->decoder->seek (ContentTime(), accurate);
842 } else if (i->content->position() <= time && time < i->content->end()) {
843 /* During; seek to position */
844 i->decoder->seek (dcp_to_content_time (i, time), accurate);
847 /* After; this piece is done */
853 _last_video_time = time;
854 _last_audio_time = time;
856 _last_video_time = optional<DCPTime>();
857 _last_audio_time = optional<DCPTime>();
860 _last_video.clear ();
864 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
866 optional<PositionImage> subtitles = subtitles_for_frame (time);
868 pv->set_subtitle (subtitles.get ());
873 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
874 _last_video_time = time + one_video_frame();
875 _active_subtitles.clear_before (time);
880 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
883 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
887 Player::fill_audio (DCPTimePeriod period)
889 if (period.from == period.to) {
893 DCPOMATIC_ASSERT (period.from < period.to);
895 DCPTime t = period.from;
896 while (t < period.to) {
897 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
898 Frame const samples = block.frames_round(_film->audio_frame_rate());
900 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
901 silence->make_silent ();
902 emit_audio (silence, t);
909 Player::one_video_frame () const
911 return DCPTime::from_frames (1, _film->video_frame_rate ());
914 pair<shared_ptr<AudioBuffers>, DCPTime>
915 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
917 DCPTime const discard_time = discard_to - time;
918 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
919 Frame remaining_frames = audio->frames() - discard_frames;
920 if (remaining_frames <= 0) {
921 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
923 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
924 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
925 return make_pair(cut, time + discard_time);