2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83 , _playlist (playlist)
84 , _have_valid_pieces (false)
85 , _ignore_video (false)
86 , _ignore_audio (false)
87 , _always_burn_subtitles (false)
89 , _play_referenced (false)
90 , _audio_merger (_film->audio_frame_rate())
92 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
93 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
94 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
95 set_video_container_size (_film->frame_size ());
97 film_changed (Film::AUDIO_PROCESSOR);
99 seek (DCPTime (), true);
103 Player::setup_pieces ()
107 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109 if (!i->paths_valid ()) {
113 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
114 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
117 /* Not something that we can decode; e.g. Atmos content */
121 if (decoder->video && _ignore_video) {
122 decoder->video->set_ignore ();
125 if (decoder->audio && _ignore_audio) {
126 decoder->audio->set_ignore ();
129 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
130 if (dcp && _play_referenced) {
131 dcp->set_decode_referenced ();
134 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
135 _pieces.push_back (piece);
137 if (decoder->video) {
138 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
141 if (decoder->audio) {
142 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
145 if (decoder->subtitle) {
146 decoder->subtitle->ImageData.connect (bind (&Player::image_subtitle, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->TextData.connect (bind (&Player::text_subtitle, this, weak_ptr<Piece> (piece), _1));
151 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152 if (i->content->audio) {
153 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154 _stream_states[j] = StreamState (i, i->content->position ());
159 if (!_play_referenced) {
160 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
161 shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
163 if (dc->reference_video()) {
164 _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
166 if (dc->reference_audio()) {
167 _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
173 _last_video_time = optional<DCPTime> ();
174 _last_audio_time = optional<DCPTime> ();
175 _have_valid_pieces = true;
179 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
181 shared_ptr<Content> c = w.lock ();
187 property == ContentProperty::POSITION ||
188 property == ContentProperty::LENGTH ||
189 property == ContentProperty::TRIM_START ||
190 property == ContentProperty::TRIM_END ||
191 property == ContentProperty::PATH ||
192 property == VideoContentProperty::FRAME_TYPE ||
193 property == DCPContentProperty::NEEDS_ASSETS ||
194 property == DCPContentProperty::NEEDS_KDM ||
195 property == SubtitleContentProperty::COLOUR ||
196 property == SubtitleContentProperty::OUTLINE ||
197 property == SubtitleContentProperty::SHADOW ||
198 property == SubtitleContentProperty::EFFECT_COLOUR ||
199 property == FFmpegContentProperty::SUBTITLE_STREAM ||
200 property == VideoContentProperty::COLOUR_CONVERSION
203 _have_valid_pieces = false;
207 property == SubtitleContentProperty::LINE_SPACING ||
208 property == SubtitleContentProperty::OUTLINE_WIDTH ||
209 property == SubtitleContentProperty::Y_SCALE ||
210 property == SubtitleContentProperty::FADE_IN ||
211 property == SubtitleContentProperty::FADE_OUT ||
212 property == ContentProperty::VIDEO_FRAME_RATE ||
213 property == SubtitleContentProperty::USE ||
214 property == SubtitleContentProperty::X_OFFSET ||
215 property == SubtitleContentProperty::Y_OFFSET ||
216 property == SubtitleContentProperty::X_SCALE ||
217 property == SubtitleContentProperty::FONTS ||
218 property == VideoContentProperty::CROP ||
219 property == VideoContentProperty::SCALE ||
220 property == VideoContentProperty::FADE_IN ||
221 property == VideoContentProperty::FADE_OUT
229 Player::set_video_container_size (dcp::Size s)
231 if (s == _video_container_size) {
235 _video_container_size = s;
237 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
238 _black_image->make_black ();
244 Player::playlist_changed ()
246 _have_valid_pieces = false;
251 Player::film_changed (Film::Property p)
253 /* Here we should notice Film properties that affect our output, and
254 alert listeners that our output now would be different to how it was
255 last time we were run.
258 if (p == Film::CONTAINER) {
260 } else if (p == Film::VIDEO_FRAME_RATE) {
261 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
262 so we need new pieces here.
264 _have_valid_pieces = false;
266 } else if (p == Film::AUDIO_PROCESSOR) {
267 if (_film->audio_processor ()) {
268 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
274 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
276 list<PositionImage> all;
278 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
283 /* We will scale the subtitle up to fit _video_container_size */
284 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
286 /* Then we need a corrective translation, consisting of two parts:
288 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
289 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
291 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
292 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
293 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
295 * Combining these two translations gives these expressions.
302 dcp::YUV_TO_RGB_REC601,
303 i->image->pixel_format (),
308 lrint (_video_container_size.width * i->rectangle.x),
309 lrint (_video_container_size.height * i->rectangle.y)
318 shared_ptr<PlayerVideo>
319 Player::black_player_video_frame () const
321 return shared_ptr<PlayerVideo> (
323 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326 _video_container_size,
327 _video_container_size,
330 PresetColourConversion::all().front().conversion
336 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
338 DCPTime s = t - piece->content->position ();
339 s = min (piece->content->length_after_trim(), s);
340 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
342 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
343 then convert that ContentTime to frames at the content's rate. However this fails for
344 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
345 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
347 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
349 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
353 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
355 /* See comment in dcp_to_content_video */
356 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
357 return max (DCPTime (), d + piece->content->position ());
361 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
363 DCPTime s = t - piece->content->position ();
364 s = min (piece->content->length_after_trim(), s);
365 /* See notes in dcp_to_content_video */
366 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
370 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
372 /* See comment in dcp_to_content_video */
373 DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
374 return max (DCPTime (), d + piece->content->position ());
378 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
380 DCPTime s = t - piece->content->position ();
381 s = min (piece->content->length_after_trim(), s);
382 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
386 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
388 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 list<shared_ptr<Font> >
392 Player::get_subtitle_fonts ()
394 if (!_have_valid_pieces) {
398 list<shared_ptr<Font> > fonts;
399 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
400 if (p->content->subtitle) {
401 /* XXX: things may go wrong if there are duplicate font IDs
402 with different font files.
404 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
405 copy (f.begin(), f.end(), back_inserter (fonts));
412 /** Set this player never to produce any video data */
414 Player::set_ignore_video ()
416 _ignore_video = true;
419 /** Set this player never to produce any audio data */
421 Player::set_ignore_audio ()
423 _ignore_audio = true;
426 /** Set whether or not this player should always burn text subtitles into the image,
427 * regardless of the content settings.
428 * @param burn true to always burn subtitles, false to obey content settings.
431 Player::set_always_burn_subtitles (bool burn)
433 _always_burn_subtitles = burn;
440 _have_valid_pieces = false;
444 Player::set_play_referenced ()
446 _play_referenced = true;
447 _have_valid_pieces = false;
450 list<ReferencedReelAsset>
451 Player::get_reel_assets ()
453 list<ReferencedReelAsset> a;
455 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
456 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
461 scoped_ptr<DCPDecoder> decoder;
463 decoder.reset (new DCPDecoder (j, _film->log()));
469 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
471 DCPOMATIC_ASSERT (j->video_frame_rate ());
472 double const cfr = j->video_frame_rate().get();
473 Frame const trim_start = j->trim_start().frames_round (cfr);
474 Frame const trim_end = j->trim_end().frames_round (cfr);
475 int const ffr = _film->video_frame_rate ();
477 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
478 if (j->reference_video ()) {
479 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
480 DCPOMATIC_ASSERT (ra);
481 ra->set_entry_point (ra->entry_point() + trim_start);
482 ra->set_duration (ra->duration() - trim_start - trim_end);
484 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
488 if (j->reference_audio ()) {
489 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
490 DCPOMATIC_ASSERT (ra);
491 ra->set_entry_point (ra->entry_point() + trim_start);
492 ra->set_duration (ra->duration() - trim_start - trim_end);
494 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
498 if (j->reference_subtitle ()) {
499 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
500 DCPOMATIC_ASSERT (ra);
501 ra->set_entry_point (ra->entry_point() + trim_start);
502 ra->set_duration (ra->duration() - trim_start - trim_end);
504 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
508 /* Assume that main picture duration is the length of the reel */
509 offset += k->main_picture()->duration ();
516 list<shared_ptr<Piece> >
517 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
519 if (!_have_valid_pieces) {
523 list<shared_ptr<Piece> > overlaps;
524 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
525 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
526 overlaps.push_back (i);
536 if (!_have_valid_pieces) {
540 shared_ptr<Piece> earliest;
541 DCPTime earliest_content;
543 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
545 DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
546 if (!earliest || t < earliest_content) {
547 earliest_content = t;
554 /* No more content; fill up with silent black */
555 DCPTimePeriod remaining_video (DCPTime(), _playlist->length());
556 if (_last_video_time) {
557 remaining_video.from = _last_video_time.get() + one_video_frame();
559 fill_video (remaining_video);
560 DCPTimePeriod remaining_audio (DCPTime(), _playlist->length());
561 if (_last_audio_time) {
562 remaining_audio.from = _last_audio_time.get();
564 fill_audio (remaining_audio);
568 earliest->done = earliest->decoder->pass ();
569 if (earliest->done && earliest->content->audio) {
570 /* Flush the Player audio system for this piece */
571 BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
572 audio_flush (earliest, i);
576 /* Emit any audio that is ready */
578 DCPTime pull_from = _playlist->length ();
579 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
580 if (!i->second.piece->done && i->second.last_push_end < pull_from) {
581 pull_from = i->second.last_push_end;
585 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
586 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
587 if (_last_audio_time && i->second < _last_audio_time.get()) {
588 /* There has been an accurate seek and we have received some audio before the seek time;
591 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
598 if (_last_audio_time) {
599 fill_audio (DCPTimePeriod (_last_audio_time.get(), i->second));
602 Audio (i->first, i->second);
603 _last_audio_time = i->second + DCPTime::from_frames(i->first->frames(), _film->audio_frame_rate());
610 Player::video (weak_ptr<Piece> wp, ContentVideo video)
612 shared_ptr<Piece> piece = wp.lock ();
617 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
618 if (frc.skip && (video.frame % 2) == 1) {
622 /* Time and period of the frame we will emit */
623 DCPTime const time = content_video_to_dcp (piece, video.frame);
624 DCPTimePeriod const period (time, time + one_video_frame());
626 /* Discard if it's outside the content's period */
627 if (time < piece->content->position() || time >= piece->content->end()) {
631 /* Get any subtitles */
633 optional<PositionImage> subtitles;
635 for (list<pair<PlayerSubtitles, DCPTimePeriod> >::const_iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
637 if (!i->second.overlap (period)) {
641 list<PositionImage> sub_images;
643 /* Image subtitles */
644 list<PositionImage> c = transform_image_subtitles (i->first.image);
645 copy (c.begin(), c.end(), back_inserter (sub_images));
647 /* Text subtitles (rendered to an image) */
648 if (!i->first.text.empty ()) {
649 list<PositionImage> s = render_subtitles (i->first.text, i->first.fonts, _video_container_size, time);
650 copy (s.begin (), s.end (), back_inserter (sub_images));
653 if (!sub_images.empty ()) {
654 subtitles = merge (sub_images);
660 if (_last_video_time) {
661 fill_video (DCPTimePeriod (_last_video_time.get() + one_video_frame(), time));
667 piece->content->video->crop (),
668 piece->content->video->fade (video.frame),
669 piece->content->video->scale().size (
670 piece->content->video, _video_container_size, _film->frame_size ()
672 _video_container_size,
675 piece->content->video->colour_conversion ()
680 _last_video->set_subtitle (subtitles.get ());
683 _last_video_time = time;
685 Video (_last_video, *_last_video_time);
687 /* Discard any subtitles we no longer need */
689 for (list<pair<PlayerSubtitles, DCPTimePeriod> >::iterator i = _subtitles.begin (); i != _subtitles.end(); ) {
690 list<pair<PlayerSubtitles, DCPTimePeriod> >::iterator tmp = i;
693 if (i->second.to < time) {
694 _subtitles.erase (i);
702 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
704 shared_ptr<AudioContent> content = piece->content->audio;
705 DCPOMATIC_ASSERT (content);
707 shared_ptr<Resampler> r = resampler (content, stream, false);
712 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
713 if (ro.first->frames() == 0) {
717 ContentAudio content_audio;
718 content_audio.audio = ro.first;
719 content_audio.frame = ro.second;
721 /* Compute time in the DCP */
722 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
724 audio_transform (content, stream, content_audio, time);
727 /** Do our common processing on some audio */
729 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
731 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
735 if (content->gain() != 0) {
736 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
737 gain->apply_gain (content->gain ());
738 content_audio.audio = gain;
743 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
744 dcp_mapped->make_silent ();
746 AudioMapping map = stream->mapping ();
747 for (int i = 0; i < map.input_channels(); ++i) {
748 for (int j = 0; j < dcp_mapped->channels(); ++j) {
749 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
750 dcp_mapped->accumulate_channel (
751 content_audio.audio.get(),
753 static_cast<dcp::Channel> (j),
754 map.get (i, static_cast<dcp::Channel> (j))
760 content_audio.audio = dcp_mapped;
764 if (_audio_processor) {
765 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
770 _audio_merger.push (content_audio.audio, time);
771 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
772 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
776 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
778 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
780 shared_ptr<Piece> piece = wp.lock ();
785 shared_ptr<AudioContent> content = piece->content->audio;
786 DCPOMATIC_ASSERT (content);
789 if (stream->frame_rate() != content->resampled_frame_rate()) {
790 shared_ptr<Resampler> r = resampler (content, stream, true);
791 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
792 if (ro.first->frames() == 0) {
795 content_audio.audio = ro.first;
796 content_audio.frame = ro.second;
799 /* Compute time in the DCP */
800 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
801 /* And the end of this block in the DCP */
802 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
804 /* Remove anything that comes before the start or after the end of the content */
805 if (time < piece->content->position()) {
806 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
808 /* This audio is entirely discarded */
811 content_audio.audio = cut.first;
813 } else if (time > piece->content->end()) {
816 } else if (end > piece->content->end()) {
817 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
818 DCPOMATIC_ASSERT (remaining_frames > 0);
819 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
820 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
821 content_audio.audio = cut;
824 audio_transform (content, stream, content_audio, time);
828 Player::image_subtitle (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
830 shared_ptr<Piece> piece = wp.lock ();
835 /* Apply content's subtitle offsets */
836 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
837 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
839 /* Apply content's subtitle scale */
840 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
841 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
843 /* Apply a corrective translation to keep the subtitle centred after that scale */
844 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
845 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
848 ps.image.push_back (subtitle.sub);
849 DCPTimePeriod period (content_time_to_dcp (piece, subtitle.period().from), content_time_to_dcp (piece, subtitle.period().to));
851 if (piece->content->subtitle->use() && (piece->content->subtitle->burn() || _always_burn_subtitles)) {
852 _subtitles.push_back (make_pair (ps, period));
854 Subtitle (ps, period);
859 Player::text_subtitle (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
861 shared_ptr<Piece> piece = wp.lock ();
867 DCPTimePeriod const period (content_time_to_dcp (piece, subtitle.period().from), content_time_to_dcp (piece, subtitle.period().to));
869 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
870 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
871 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
872 float const xs = piece->content->subtitle->x_scale();
873 float const ys = piece->content->subtitle->y_scale();
874 float size = s.size();
876 /* Adjust size to express the common part of the scaling;
877 e.g. if xs = ys = 0.5 we scale size by 2.
879 if (xs > 1e-5 && ys > 1e-5) {
880 size *= 1 / min (1 / xs, 1 / ys);
884 /* Then express aspect ratio changes */
885 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
886 s.set_aspect_adjust (xs / ys);
889 s.set_in (dcp::Time(period.from.seconds(), 1000));
890 s.set_out (dcp::Time(period.to.seconds(), 1000));
891 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
892 ps.add_fonts (piece->content->subtitle->fonts ());
895 if (piece->content->subtitle->use() && (piece->content->subtitle->burn() || _always_burn_subtitles)) {
896 _subtitles.push_back (make_pair (ps, period));
898 Subtitle (ps, period);
903 Player::seek (DCPTime time, bool accurate)
905 if (_audio_processor) {
906 _audio_processor->flush ();
909 for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
914 _audio_merger.clear ();
916 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
918 if (i->content->position() <= time && time < i->content->end()) {
919 i->decoder->seek (dcp_to_content_time (i, time), accurate);
924 _last_video_time = time - one_video_frame ();
925 _last_audio_time = time;
927 _last_video_time = optional<DCPTime> ();
928 _last_audio_time = optional<DCPTime> ();
932 shared_ptr<Resampler>
933 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
935 ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
936 if (i != _resamplers.end ()) {
941 return shared_ptr<Resampler> ();
945 "Creating new resampler from %1 to %2 with %3 channels",
946 stream->frame_rate(),
947 content->resampled_frame_rate(),
951 shared_ptr<Resampler> r (
952 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
955 _resamplers[make_pair(content, stream)] = r;
960 Player::fill_video (DCPTimePeriod period)
962 /* XXX: this may not work for 3D */
963 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_video)) {
964 for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
965 if (_playlist->video_content_at(j) && _last_video) {
966 Video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
968 Video (black_player_video_frame(), j);
975 Player::fill_audio (DCPTimePeriod period)
977 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
980 DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
981 Frame const samples = block.frames_round(_film->audio_frame_rate());
983 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
984 silence->make_silent ();
993 Player::one_video_frame () const
995 return DCPTime::from_frames (1, _film->video_frame_rate ());
998 pair<shared_ptr<AudioBuffers>, DCPTime>
999 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1001 DCPTime const discard_time = discard_to - time;
1002 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1003 Frame remaining_frames = audio->frames() - discard_frames;
1004 if (remaining_frames <= 0) {
1005 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1007 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1008 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1009 return make_pair(cut, time + discard_time);