2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83 , _playlist (playlist)
84 , _have_valid_pieces (false)
85 , _ignore_video (false)
86 , _ignore_audio (false)
87 , _always_burn_subtitles (false)
89 , _play_referenced (false)
90 , _audio_merger (_film->audio_frame_rate())
92 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
93 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
94 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
95 set_video_container_size (_film->frame_size ());
97 film_changed (Film::AUDIO_PROCESSOR);
99 seek (DCPTime (), true);
103 Player::setup_pieces ()
107 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109 if (!i->paths_valid ()) {
113 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
114 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
117 /* Not something that we can decode; e.g. Atmos content */
121 if (decoder->video && _ignore_video) {
122 decoder->video->set_ignore ();
125 if (decoder->audio && _ignore_audio) {
126 decoder->audio->set_ignore ();
129 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
130 if (dcp && _play_referenced) {
131 dcp->set_decode_referenced ();
134 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
135 _pieces.push_back (piece);
137 if (decoder->video) {
138 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
141 if (decoder->audio) {
142 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
145 if (decoder->subtitle) {
146 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
152 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
153 if (i->content->audio) {
154 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
155 _stream_states[j] = StreamState (i, i->content->position ());
160 if (!_play_referenced) {
161 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
162 shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164 if (dc->reference_video()) {
165 _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167 if (dc->reference_audio()) {
168 _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
174 _last_video_time = optional<DCPTime> ();
175 _last_audio_time = optional<DCPTime> ();
176 _have_valid_pieces = true;
180 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 shared_ptr<Content> c = w.lock ();
188 property == ContentProperty::POSITION ||
189 property == ContentProperty::LENGTH ||
190 property == ContentProperty::TRIM_START ||
191 property == ContentProperty::TRIM_END ||
192 property == ContentProperty::PATH ||
193 property == VideoContentProperty::FRAME_TYPE ||
194 property == DCPContentProperty::NEEDS_ASSETS ||
195 property == DCPContentProperty::NEEDS_KDM ||
196 property == SubtitleContentProperty::COLOUR ||
197 property == SubtitleContentProperty::OUTLINE ||
198 property == SubtitleContentProperty::SHADOW ||
199 property == SubtitleContentProperty::EFFECT_COLOUR ||
200 property == FFmpegContentProperty::SUBTITLE_STREAM ||
201 property == VideoContentProperty::COLOUR_CONVERSION
204 _have_valid_pieces = false;
208 property == SubtitleContentProperty::LINE_SPACING ||
209 property == SubtitleContentProperty::OUTLINE_WIDTH ||
210 property == SubtitleContentProperty::Y_SCALE ||
211 property == SubtitleContentProperty::FADE_IN ||
212 property == SubtitleContentProperty::FADE_OUT ||
213 property == ContentProperty::VIDEO_FRAME_RATE ||
214 property == SubtitleContentProperty::USE ||
215 property == SubtitleContentProperty::X_OFFSET ||
216 property == SubtitleContentProperty::Y_OFFSET ||
217 property == SubtitleContentProperty::X_SCALE ||
218 property == SubtitleContentProperty::FONTS ||
219 property == VideoContentProperty::CROP ||
220 property == VideoContentProperty::SCALE ||
221 property == VideoContentProperty::FADE_IN ||
222 property == VideoContentProperty::FADE_OUT
230 Player::set_video_container_size (dcp::Size s)
232 if (s == _video_container_size) {
236 _video_container_size = s;
238 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
239 _black_image->make_black ();
245 Player::playlist_changed ()
247 _have_valid_pieces = false;
252 Player::film_changed (Film::Property p)
254 /* Here we should notice Film properties that affect our output, and
255 alert listeners that our output now would be different to how it was
256 last time we were run.
259 if (p == Film::CONTAINER) {
261 } else if (p == Film::VIDEO_FRAME_RATE) {
262 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
263 so we need new pieces here.
265 _have_valid_pieces = false;
267 } else if (p == Film::AUDIO_PROCESSOR) {
268 if (_film->audio_processor ()) {
269 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
275 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 list<PositionImage> all;
279 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
284 /* We will scale the subtitle up to fit _video_container_size */
285 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287 /* Then we need a corrective translation, consisting of two parts:
289 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
290 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
293 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
294 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296 * Combining these two translations gives these expressions.
303 dcp::YUV_TO_RGB_REC601,
304 i->image->pixel_format (),
309 lrint (_video_container_size.width * i->rectangle.x),
310 lrint (_video_container_size.height * i->rectangle.y)
319 shared_ptr<PlayerVideo>
320 Player::black_player_video_frame () const
322 return shared_ptr<PlayerVideo> (
324 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
327 _video_container_size,
328 _video_container_size,
331 PresetColourConversion::all().front().conversion
337 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 DCPTime s = t - piece->content->position ();
340 s = min (piece->content->length_after_trim(), s);
341 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
344 then convert that ContentTime to frames at the content's rate. However this fails for
345 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
346 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
354 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 /* See comment in dcp_to_content_video */
357 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
358 return max (DCPTime (), d + piece->content->position ());
362 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 DCPTime s = t - piece->content->position ();
365 s = min (piece->content->length_after_trim(), s);
366 /* See notes in dcp_to_content_video */
367 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
371 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 /* See comment in dcp_to_content_video */
374 DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
375 return max (DCPTime (), d + piece->content->position ());
379 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(), s);
383 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
387 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
392 list<shared_ptr<Font> >
393 Player::get_subtitle_fonts ()
395 if (!_have_valid_pieces) {
399 list<shared_ptr<Font> > fonts;
400 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
401 if (p->content->subtitle) {
402 /* XXX: things may go wrong if there are duplicate font IDs
403 with different font files.
405 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
406 copy (f.begin(), f.end(), back_inserter (fonts));
413 /** Set this player never to produce any video data */
415 Player::set_ignore_video ()
417 _ignore_video = true;
420 /** Set this player never to produce any audio data */
422 Player::set_ignore_audio ()
424 _ignore_audio = true;
427 /** Set whether or not this player should always burn text subtitles into the image,
428 * regardless of the content settings.
429 * @param burn true to always burn subtitles, false to obey content settings.
432 Player::set_always_burn_subtitles (bool burn)
434 _always_burn_subtitles = burn;
441 _have_valid_pieces = false;
445 Player::set_play_referenced ()
447 _play_referenced = true;
448 _have_valid_pieces = false;
451 list<ReferencedReelAsset>
452 Player::get_reel_assets ()
454 list<ReferencedReelAsset> a;
456 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
457 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
462 scoped_ptr<DCPDecoder> decoder;
464 decoder.reset (new DCPDecoder (j, _film->log()));
470 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
472 DCPOMATIC_ASSERT (j->video_frame_rate ());
473 double const cfr = j->video_frame_rate().get();
474 Frame const trim_start = j->trim_start().frames_round (cfr);
475 Frame const trim_end = j->trim_end().frames_round (cfr);
476 int const ffr = _film->video_frame_rate ();
478 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
479 if (j->reference_video ()) {
480 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
481 DCPOMATIC_ASSERT (ra);
482 ra->set_entry_point (ra->entry_point() + trim_start);
483 ra->set_duration (ra->duration() - trim_start - trim_end);
485 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
489 if (j->reference_audio ()) {
490 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
491 DCPOMATIC_ASSERT (ra);
492 ra->set_entry_point (ra->entry_point() + trim_start);
493 ra->set_duration (ra->duration() - trim_start - trim_end);
495 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
499 if (j->reference_subtitle ()) {
500 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
501 DCPOMATIC_ASSERT (ra);
502 ra->set_entry_point (ra->entry_point() + trim_start);
503 ra->set_duration (ra->duration() - trim_start - trim_end);
505 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
509 /* Assume that main picture duration is the length of the reel */
510 offset += k->main_picture()->duration ();
517 list<shared_ptr<Piece> >
518 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
520 if (!_have_valid_pieces) {
524 list<shared_ptr<Piece> > overlaps;
525 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
526 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
527 overlaps.push_back (i);
537 if (!_have_valid_pieces) {
541 shared_ptr<Piece> earliest;
542 DCPTime earliest_content;
544 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
546 DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
547 if (!earliest || t < earliest_content) {
548 earliest_content = t;
555 /* No more content; fill up with silent black */
556 DCPTimePeriod remaining_video (DCPTime(), _playlist->length());
557 if (_last_video_time) {
558 remaining_video.from = _last_video_time.get();
560 fill_video (remaining_video);
561 DCPTimePeriod remaining_audio (DCPTime(), _playlist->length());
562 if (_last_audio_time) {
563 remaining_audio.from = _last_audio_time.get();
565 fill_audio (remaining_audio);
569 earliest->done = earliest->decoder->pass ();
570 if (earliest->done && earliest->content->audio) {
571 /* Flush the Player audio system for this piece */
572 BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
573 audio_flush (earliest, i);
577 /* Emit any audio that is ready */
579 DCPTime pull_from = _playlist->length ();
580 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
581 if (!i->second.piece->done && i->second.last_push_end < pull_from) {
582 pull_from = i->second.last_push_end;
586 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
587 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
588 if (_last_audio_time && i->second < _last_audio_time.get()) {
589 /* There has been an accurate seek and we have received some audio before the seek time;
592 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
599 if (_last_audio_time) {
600 fill_audio (DCPTimePeriod (_last_audio_time.get(), i->second));
603 Audio (i->first, i->second);
604 _last_audio_time = i->second + DCPTime::from_frames(i->first->frames(), _film->audio_frame_rate());
611 Player::video (weak_ptr<Piece> wp, ContentVideo video)
613 shared_ptr<Piece> piece = wp.lock ();
618 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
619 if (frc.skip && (video.frame % 2) == 1) {
623 /* Time and period of the frame we will emit */
624 DCPTime const time = content_video_to_dcp (piece, video.frame);
625 DCPTimePeriod const period (time, time + one_video_frame());
627 /* Discard if it's outside the content's period or if it's before the last accurate seek */
628 if (time < piece->content->position() || time >= piece->content->end() || (_last_video_time && time < _last_video_time)) {
632 /* Get any subtitles */
634 optional<PositionImage> subtitles;
636 for (ActiveSubtitles::const_iterator i = _active_subtitles.begin(); i != _active_subtitles.end(); ++i) {
638 shared_ptr<Piece> sub_piece = i->first.lock ();
643 if (!sub_piece->content->subtitle->use() || (!_always_burn_subtitles && !piece->content->subtitle->burn())) {
647 pair<PlayerSubtitles, DCPTime> sub = i->second;
649 list<PositionImage> sub_images;
651 /* Image subtitles */
652 list<PositionImage> c = transform_image_subtitles (sub.first.image);
653 copy (c.begin(), c.end(), back_inserter (sub_images));
655 /* Text subtitles (rendered to an image) */
656 if (!sub.first.text.empty ()) {
657 list<PositionImage> s = render_subtitles (sub.first.text, sub.first.fonts, _video_container_size, time);
658 copy (s.begin (), s.end (), back_inserter (sub_images));
661 if (!sub_images.empty ()) {
662 subtitles = merge (sub_images);
668 if (_last_video_time) {
669 fill_video (DCPTimePeriod (_last_video_time.get(), time));
675 piece->content->video->crop (),
676 piece->content->video->fade (video.frame),
677 piece->content->video->scale().size (
678 piece->content->video, _video_container_size, _film->frame_size ()
680 _video_container_size,
683 piece->content->video->colour_conversion ()
688 _last_video->set_subtitle (subtitles.get ());
691 Video (_last_video, time);
693 _last_video_time = time + one_video_frame ();
697 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
699 shared_ptr<AudioContent> content = piece->content->audio;
700 DCPOMATIC_ASSERT (content);
702 shared_ptr<Resampler> r = resampler (content, stream, false);
707 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
708 if (ro.first->frames() == 0) {
712 ContentAudio content_audio;
713 content_audio.audio = ro.first;
714 content_audio.frame = ro.second;
716 /* Compute time in the DCP */
717 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
719 audio_transform (content, stream, content_audio, time);
722 /** Do our common processing on some audio */
724 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
726 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
730 if (content->gain() != 0) {
731 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
732 gain->apply_gain (content->gain ());
733 content_audio.audio = gain;
738 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
739 dcp_mapped->make_silent ();
741 AudioMapping map = stream->mapping ();
742 for (int i = 0; i < map.input_channels(); ++i) {
743 for (int j = 0; j < dcp_mapped->channels(); ++j) {
744 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
745 dcp_mapped->accumulate_channel (
746 content_audio.audio.get(),
748 static_cast<dcp::Channel> (j),
749 map.get (i, static_cast<dcp::Channel> (j))
755 content_audio.audio = dcp_mapped;
759 if (_audio_processor) {
760 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
765 _audio_merger.push (content_audio.audio, time);
766 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
767 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
771 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
773 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
775 shared_ptr<Piece> piece = wp.lock ();
780 shared_ptr<AudioContent> content = piece->content->audio;
781 DCPOMATIC_ASSERT (content);
784 if (stream->frame_rate() != content->resampled_frame_rate()) {
785 shared_ptr<Resampler> r = resampler (content, stream, true);
786 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
787 if (ro.first->frames() == 0) {
790 content_audio.audio = ro.first;
791 content_audio.frame = ro.second;
794 /* Compute time in the DCP */
795 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
796 /* And the end of this block in the DCP */
797 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
799 /* Remove anything that comes before the start or after the end of the content */
800 if (time < piece->content->position()) {
801 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
803 /* This audio is entirely discarded */
806 content_audio.audio = cut.first;
808 } else if (time > piece->content->end()) {
811 } else if (end > piece->content->end()) {
812 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
813 DCPOMATIC_ASSERT (remaining_frames > 0);
814 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
815 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
816 content_audio.audio = cut;
819 audio_transform (content, stream, content_audio, time);
823 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
825 shared_ptr<Piece> piece = wp.lock ();
830 /* Apply content's subtitle offsets */
831 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
832 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
834 /* Apply content's subtitle scale */
835 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
836 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
838 /* Apply a corrective translation to keep the subtitle centred after that scale */
839 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
840 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
843 ps.image.push_back (subtitle.sub);
844 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
846 _active_subtitles[wp] = make_pair (ps, from);
850 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
852 shared_ptr<Piece> piece = wp.lock ();
858 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
860 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
861 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
862 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
863 float const xs = piece->content->subtitle->x_scale();
864 float const ys = piece->content->subtitle->y_scale();
865 float size = s.size();
867 /* Adjust size to express the common part of the scaling;
868 e.g. if xs = ys = 0.5 we scale size by 2.
870 if (xs > 1e-5 && ys > 1e-5) {
871 size *= 1 / min (1 / xs, 1 / ys);
875 /* Then express aspect ratio changes */
876 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
877 s.set_aspect_adjust (xs / ys);
880 s.set_in (dcp::Time(from.seconds(), 1000));
881 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
882 ps.add_fonts (piece->content->subtitle->fonts ());
885 _active_subtitles[wp] = make_pair (ps, from);
889 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
891 if (_active_subtitles.find (wp) == _active_subtitles.end ()) {
895 shared_ptr<Piece> piece = wp.lock ();
900 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
901 Subtitle (_active_subtitles[wp].first, DCPTimePeriod (_active_subtitles[wp].second, content_time_to_dcp (piece, to)));
904 _active_subtitles.erase (wp);
908 Player::seek (DCPTime time, bool accurate)
910 if (_audio_processor) {
911 _audio_processor->flush ();
914 for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
919 _audio_merger.clear ();
920 _active_subtitles.clear ();
922 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
924 DCPTime const t = min(max(time, i->content->position()), i->content->end());
925 i->decoder->seek (dcp_to_content_time (i, t), accurate);
929 _last_video_time = time;
930 _last_audio_time = time;
932 _last_video_time = optional<DCPTime> ();
933 _last_audio_time = optional<DCPTime> ();
937 shared_ptr<Resampler>
938 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
940 ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
941 if (i != _resamplers.end ()) {
946 return shared_ptr<Resampler> ();
950 "Creating new resampler from %1 to %2 with %3 channels",
951 stream->frame_rate(),
952 content->resampled_frame_rate(),
956 shared_ptr<Resampler> r (
957 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
960 _resamplers[make_pair(content, stream)] = r;
965 Player::fill_video (DCPTimePeriod period)
967 /* XXX: this may not work for 3D */
968 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_video)) {
969 for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
970 if (_playlist->video_content_at(j) && _last_video) {
971 Video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
973 Video (black_player_video_frame(), j);
980 Player::fill_audio (DCPTimePeriod period)
982 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
985 DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
986 Frame const samples = block.frames_round(_film->audio_frame_rate());
988 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
989 silence->make_silent ();
998 Player::one_video_frame () const
1000 return DCPTime::from_frames (1, _film->video_frame_rate ());
1003 pair<shared_ptr<AudioBuffers>, DCPTime>
1004 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1006 DCPTime const discard_time = discard_to - time;
1007 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1008 Frame remaining_frames = audio->frames() - discard_frames;
1009 if (remaining_frames <= 0) {
1010 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1012 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1013 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1014 return make_pair(cut, time + discard_time);