2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83 , _playlist (playlist)
84 , _have_valid_pieces (false)
85 , _ignore_video (false)
86 , _ignore_audio (false)
87 , _always_burn_subtitles (false)
89 , _play_referenced (false)
90 , _last_seek_accurate (true)
91 , _audio_merger (_film->audio_frame_rate())
93 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96 set_video_container_size (_film->frame_size ());
98 film_changed (Film::AUDIO_PROCESSOR);
100 seek (DCPTime (), true);
104 Player::setup_pieces ()
108 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
110 if (!i->paths_valid ()) {
114 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
118 /* Not something that we can decode; e.g. Atmos content */
122 if (decoder->video && _ignore_video) {
123 decoder->video->set_ignore ();
126 if (decoder->audio && _ignore_audio) {
127 decoder->audio->set_ignore ();
130 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131 if (dcp && _play_referenced) {
132 dcp->set_decode_referenced ();
135 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136 _pieces.push_back (piece);
138 if (decoder->video) {
139 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
142 if (decoder->audio) {
143 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
146 if (decoder->subtitle) {
147 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
153 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154 if (i->content->audio) {
155 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156 _stream_states[j] = StreamState (i, i->content->position ());
161 if (!_play_referenced) {
162 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163 shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
165 if (dc->reference_video()) {
166 _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
168 if (dc->reference_audio()) {
169 _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
175 _last_video_time = optional<DCPTime> ();
176 _last_audio_time = optional<DCPTime> ();
177 _have_valid_pieces = true;
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
183 shared_ptr<Content> c = w.lock ();
189 property == ContentProperty::POSITION ||
190 property == ContentProperty::LENGTH ||
191 property == ContentProperty::TRIM_START ||
192 property == ContentProperty::TRIM_END ||
193 property == ContentProperty::PATH ||
194 property == VideoContentProperty::FRAME_TYPE ||
195 property == DCPContentProperty::NEEDS_ASSETS ||
196 property == DCPContentProperty::NEEDS_KDM ||
197 property == SubtitleContentProperty::COLOUR ||
198 property == SubtitleContentProperty::OUTLINE ||
199 property == SubtitleContentProperty::SHADOW ||
200 property == SubtitleContentProperty::EFFECT_COLOUR ||
201 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202 property == VideoContentProperty::COLOUR_CONVERSION
205 _have_valid_pieces = false;
209 property == SubtitleContentProperty::LINE_SPACING ||
210 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211 property == SubtitleContentProperty::Y_SCALE ||
212 property == SubtitleContentProperty::FADE_IN ||
213 property == SubtitleContentProperty::FADE_OUT ||
214 property == ContentProperty::VIDEO_FRAME_RATE ||
215 property == SubtitleContentProperty::USE ||
216 property == SubtitleContentProperty::X_OFFSET ||
217 property == SubtitleContentProperty::Y_OFFSET ||
218 property == SubtitleContentProperty::X_SCALE ||
219 property == SubtitleContentProperty::FONTS ||
220 property == VideoContentProperty::CROP ||
221 property == VideoContentProperty::SCALE ||
222 property == VideoContentProperty::FADE_IN ||
223 property == VideoContentProperty::FADE_OUT
231 Player::set_video_container_size (dcp::Size s)
233 if (s == _video_container_size) {
237 _video_container_size = s;
239 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240 _black_image->make_black ();
246 Player::playlist_changed ()
248 _have_valid_pieces = false;
253 Player::film_changed (Film::Property p)
255 /* Here we should notice Film properties that affect our output, and
256 alert listeners that our output now would be different to how it was
257 last time we were run.
260 if (p == Film::CONTAINER) {
262 } else if (p == Film::VIDEO_FRAME_RATE) {
263 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264 so we need new pieces here.
266 _have_valid_pieces = false;
268 } else if (p == Film::AUDIO_PROCESSOR) {
269 if (_film->audio_processor ()) {
270 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
278 list<PositionImage> all;
280 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
285 /* We will scale the subtitle up to fit _video_container_size */
286 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
288 /* Then we need a corrective translation, consisting of two parts:
290 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
291 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
293 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
297 * Combining these two translations gives these expressions.
304 dcp::YUV_TO_RGB_REC601,
305 i->image->pixel_format (),
310 lrint (_video_container_size.width * i->rectangle.x),
311 lrint (_video_container_size.height * i->rectangle.y)
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
323 return shared_ptr<PlayerVideo> (
325 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
328 _video_container_size,
329 _video_container_size,
332 PresetColourConversion::all().front().conversion
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
340 DCPTime s = t - piece->content->position ();
341 s = min (piece->content->length_after_trim(), s);
342 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
344 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345 then convert that ContentTime to frames at the content's rate. However this fails for
346 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
347 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
349 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
351 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
357 /* See comment in dcp_to_content_video */
358 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359 return max (DCPTime (), d + piece->content->position ());
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
365 DCPTime s = t - piece->content->position ();
366 s = min (piece->content->length_after_trim(), s);
367 /* See notes in dcp_to_content_video */
368 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
374 /* See comment in dcp_to_content_video */
375 DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376 return max (DCPTime (), d + piece->content->position ());
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
382 DCPTime s = t - piece->content->position ();
383 s = min (piece->content->length_after_trim(), s);
384 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
390 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
396 if (!_have_valid_pieces) {
400 list<shared_ptr<Font> > fonts;
401 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402 if (p->content->subtitle) {
403 /* XXX: things may go wrong if there are duplicate font IDs
404 with different font files.
406 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407 copy (f.begin(), f.end(), back_inserter (fonts));
414 /** Set this player never to produce any video data */
416 Player::set_ignore_video ()
418 _ignore_video = true;
421 /** Set whether or not this player should always burn text subtitles into the image,
422 * regardless of the content settings.
423 * @param burn true to always burn subtitles, false to obey content settings.
426 Player::set_always_burn_subtitles (bool burn)
428 _always_burn_subtitles = burn;
435 _have_valid_pieces = false;
439 Player::set_play_referenced ()
441 _play_referenced = true;
442 _have_valid_pieces = false;
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
448 list<ReferencedReelAsset> a;
450 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
456 scoped_ptr<DCPDecoder> decoder;
458 decoder.reset (new DCPDecoder (j, _film->log()));
464 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
466 DCPOMATIC_ASSERT (j->video_frame_rate ());
467 double const cfr = j->video_frame_rate().get();
468 Frame const trim_start = j->trim_start().frames_round (cfr);
469 Frame const trim_end = j->trim_end().frames_round (cfr);
470 int const ffr = _film->video_frame_rate ();
472 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473 if (j->reference_video ()) {
474 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475 DCPOMATIC_ASSERT (ra);
476 ra->set_entry_point (ra->entry_point() + trim_start);
477 ra->set_duration (ra->duration() - trim_start - trim_end);
479 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
483 if (j->reference_audio ()) {
484 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485 DCPOMATIC_ASSERT (ra);
486 ra->set_entry_point (ra->entry_point() + trim_start);
487 ra->set_duration (ra->duration() - trim_start - trim_end);
489 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
493 if (j->reference_subtitle ()) {
494 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495 DCPOMATIC_ASSERT (ra);
496 ra->set_entry_point (ra->entry_point() + trim_start);
497 ra->set_duration (ra->duration() - trim_start - trim_end);
499 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
503 /* Assume that main picture duration is the length of the reel */
504 offset += k->main_picture()->duration ();
514 if (!_have_valid_pieces) {
518 shared_ptr<Piece> earliest;
519 DCPTime earliest_content;
521 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
523 DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524 if (!earliest || t < earliest_content) {
525 earliest_content = t;
532 earliest->done = earliest->decoder->pass ();
533 if (earliest->done && earliest->content->audio) {
534 /* Flush the Player audio system for this piece */
535 BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536 audio_flush (earliest, i);
541 /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
542 NOT to fill gaps within content (the latter is done in ::video())
544 DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
546 /* Work out where to fill video from */
547 optional<DCPTime> video_fill_from;
548 if (_last_video_time && !_playlist->video_content_at(*_last_video_time)) {
549 /* No seek; fill from the last video time */
550 video_fill_from = _last_video_time;
551 } else if (_last_seek_time && !_playlist->video_content_at(*_last_seek_time)) {
552 /* Seek into an empty area; fill from the seek time */
553 video_fill_from = _last_seek_time;
557 /* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom
558 Piece which emits black in spaces (we only emit if we are the earliest thing)
560 if (earliest && video_fill_from && *video_fill_from < earliest_content && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
561 emit_video (black_player_video_frame(), *video_fill_from);
563 } else if (_playlist->length() == DCPTime()) {
564 /* Special case of an empty Film; just give one black frame */
565 emit_video (black_player_video_frame(), DCPTime());
569 optional<DCPTime> audio_fill_from;
570 if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time)) {
571 /* No seek; fill from the last thing that happened */
572 audio_fill_from = _last_audio_time;
573 } else if (_last_seek_time && !_playlist->audio_content_at(*_last_seek_time)) {
574 /* Seek into an empty area; fill from the seek time */
575 audio_fill_from = _last_seek_time;
578 if (audio_fill_from && audio_fill_from < fill_towards) {
579 DCPTimePeriod period (*audio_fill_from, fill_towards);
580 if (period.duration() > one_video_frame()) {
581 period.to = period.from + one_video_frame();
587 /* Emit any audio that is ready */
589 DCPTime pull_to = _playlist->length ();
590 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
591 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
592 pull_to = i->second.last_push_end;
596 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
597 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
598 if (_last_audio_time && i->second < *_last_audio_time) {
599 /* There has been an accurate seek and we have received some audio before the seek time;
602 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
609 if (_last_audio_time) {
610 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
613 emit_audio (i->first, i->second);
616 return !earliest && !filled;
619 optional<PositionImage>
620 Player::subtitles_for_frame (DCPTime time) const
622 list<PositionImage> subtitles;
624 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
626 /* Image subtitles */
627 list<PositionImage> c = transform_image_subtitles (i.image);
628 copy (c.begin(), c.end(), back_inserter (subtitles));
630 /* Text subtitles (rendered to an image) */
631 if (!i.text.empty ()) {
632 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
633 copy (s.begin(), s.end(), back_inserter (subtitles));
637 if (subtitles.empty ()) {
638 return optional<PositionImage> ();
641 return merge (subtitles);
645 Player::video (weak_ptr<Piece> wp, ContentVideo video)
647 shared_ptr<Piece> piece = wp.lock ();
652 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
653 if (frc.skip && (video.frame % 2) == 1) {
657 /* Time and period of the frame we will emit */
658 DCPTime const time = content_video_to_dcp (piece, video.frame);
659 DCPTimePeriod const period (time, time + one_video_frame());
661 /* Discard if it's outside the content's period or if it's before the last accurate seek */
663 time < piece->content->position() ||
664 time >= piece->content->end() ||
665 (_last_seek_time && _last_seek_accurate && time < *_last_seek_time)) {
669 /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video. We have to do this here
670 as in the problematic case we are about to emit a frame which is not contiguous with the previous.
673 optional<DCPTime> fill_to;
674 if (_last_video_time) {
675 fill_to = _last_video_time;
676 } else if (_last_seek_time && _last_seek_accurate) {
677 fill_to = _last_seek_time;
681 /* XXX: this may not work for 3D */
682 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
683 for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
685 emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
687 emit_video (black_player_video_frame(), j);
696 piece->content->video->crop (),
697 piece->content->video->fade (video.frame),
698 piece->content->video->scale().size (
699 piece->content->video, _video_container_size, _film->frame_size ()
701 _video_container_size,
704 piece->content->video->colour_conversion ()
708 emit_video (_last_video, time);
712 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
714 shared_ptr<AudioContent> content = piece->content->audio;
715 DCPOMATIC_ASSERT (content);
717 shared_ptr<Resampler> r = resampler (content, stream, false);
722 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
723 if (ro.first->frames() == 0) {
727 ContentAudio content_audio;
728 content_audio.audio = ro.first;
729 content_audio.frame = ro.second;
731 /* Compute time in the DCP */
732 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
734 audio_transform (content, stream, content_audio, time);
737 /** Do our common processing on some audio */
739 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
741 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
745 if (content->gain() != 0) {
746 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
747 gain->apply_gain (content->gain ());
748 content_audio.audio = gain;
753 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
754 dcp_mapped->make_silent ();
756 AudioMapping map = stream->mapping ();
757 for (int i = 0; i < map.input_channels(); ++i) {
758 for (int j = 0; j < dcp_mapped->channels(); ++j) {
759 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
760 dcp_mapped->accumulate_channel (
761 content_audio.audio.get(),
763 static_cast<dcp::Channel> (j),
764 map.get (i, static_cast<dcp::Channel> (j))
770 content_audio.audio = dcp_mapped;
774 if (_audio_processor) {
775 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
780 _audio_merger.push (content_audio.audio, time);
781 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
782 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
786 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
788 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
790 shared_ptr<Piece> piece = wp.lock ();
795 shared_ptr<AudioContent> content = piece->content->audio;
796 DCPOMATIC_ASSERT (content);
799 if (stream->frame_rate() != content->resampled_frame_rate()) {
800 shared_ptr<Resampler> r = resampler (content, stream, true);
801 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
802 if (ro.first->frames() == 0) {
805 content_audio.audio = ro.first;
806 content_audio.frame = ro.second;
809 /* Compute time in the DCP */
810 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
811 /* And the end of this block in the DCP */
812 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
814 /* Pad any gap which may be caused by audio delay */
815 if (_last_audio_time) {
816 fill_audio (DCPTimePeriod (*_last_audio_time, time));
817 } else if (_last_seek_time && _last_seek_accurate) {
818 fill_audio (DCPTimePeriod (*_last_seek_time, time));
821 /* Remove anything that comes before the start or after the end of the content */
822 if (time < piece->content->position()) {
823 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
825 /* This audio is entirely discarded */
828 content_audio.audio = cut.first;
830 } else if (time > piece->content->end()) {
833 } else if (end > piece->content->end()) {
834 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
835 if (remaining_frames == 0) {
838 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
839 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
840 content_audio.audio = cut;
843 audio_transform (content, stream, content_audio, time);
847 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
849 shared_ptr<Piece> piece = wp.lock ();
854 /* Apply content's subtitle offsets */
855 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
856 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
858 /* Apply content's subtitle scale */
859 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
860 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
862 /* Apply a corrective translation to keep the subtitle centred after that scale */
863 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
864 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
867 ps.image.push_back (subtitle.sub);
868 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
870 _active_subtitles.add_from (wp, ps, from);
874 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
876 shared_ptr<Piece> piece = wp.lock ();
882 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
884 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
885 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
886 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
887 float const xs = piece->content->subtitle->x_scale();
888 float const ys = piece->content->subtitle->y_scale();
889 float size = s.size();
891 /* Adjust size to express the common part of the scaling;
892 e.g. if xs = ys = 0.5 we scale size by 2.
894 if (xs > 1e-5 && ys > 1e-5) {
895 size *= 1 / min (1 / xs, 1 / ys);
899 /* Then express aspect ratio changes */
900 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
901 s.set_aspect_adjust (xs / ys);
904 s.set_in (dcp::Time(from.seconds(), 1000));
905 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
906 ps.add_fonts (piece->content->subtitle->fonts ());
909 _active_subtitles.add_from (wp, ps, from);
913 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
915 if (!_active_subtitles.have (wp)) {
919 shared_ptr<Piece> piece = wp.lock ();
924 DCPTime const dcp_to = content_time_to_dcp (piece, to);
926 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
928 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
929 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
934 Player::seek (DCPTime time, bool accurate)
936 if (_audio_processor) {
937 _audio_processor->flush ();
940 for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
945 _audio_merger.clear ();
946 _active_subtitles.clear ();
948 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
949 if (time < i->content->position()) {
950 /* Before; seek to 0 */
951 i->decoder->seek (ContentTime(), accurate);
953 } else if (i->content->position() <= time && time < i->content->end()) {
954 /* During; seek to position */
955 i->decoder->seek (dcp_to_content_time (i, time), accurate);
958 /* After; this piece is done */
963 _last_video_time = optional<DCPTime> ();
964 _last_audio_time = optional<DCPTime> ();
965 _last_seek_time = time;
966 _last_seek_accurate = accurate;
969 shared_ptr<Resampler>
970 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
972 ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
973 if (i != _resamplers.end ()) {
978 return shared_ptr<Resampler> ();
982 "Creating new resampler from %1 to %2 with %3 channels",
983 stream->frame_rate(),
984 content->resampled_frame_rate(),
988 shared_ptr<Resampler> r (
989 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
992 _resamplers[make_pair(content, stream)] = r;
997 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
999 optional<PositionImage> subtitles = subtitles_for_frame (time);
1001 pv->set_subtitle (subtitles.get ());
1004 _last_video_time = time + one_video_frame();
1005 _active_subtitles.clear_before (time);
1009 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1012 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1016 Player::fill_audio (DCPTimePeriod period)
1018 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1021 DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1022 Frame const samples = block.frames_round(_film->audio_frame_rate());
1024 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1025 silence->make_silent ();
1026 emit_audio (silence, t);
1034 Player::one_video_frame () const
1036 return DCPTime::from_frames (1, _film->video_frame_rate ());
1039 pair<shared_ptr<AudioBuffers>, DCPTime>
1040 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1042 DCPTime const discard_time = discard_to - time;
1043 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1044 Frame remaining_frames = audio->frames() - discard_frames;
1045 if (remaining_frames <= 0) {
1046 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1048 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1049 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1050 return make_pair(cut, time + discard_time);