2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82 , _playlist (playlist)
83 , _have_valid_pieces (false)
84 , _ignore_video (false)
85 , _ignore_audio (false)
86 , _always_burn_subtitles (false)
88 , _play_referenced (false)
89 , _audio_merger (_film->audio_frame_rate())
91 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94 set_video_container_size (_film->frame_size ());
96 film_changed (Film::AUDIO_PROCESSOR);
98 seek (DCPTime (), true);
102 Player::setup_pieces ()
106 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108 if (!i->paths_valid ()) {
112 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116 /* Not something that we can decode; e.g. Atmos content */
120 if (decoder->video && _ignore_video) {
121 decoder->video->set_ignore ();
124 if (decoder->audio && _ignore_audio) {
125 decoder->audio->set_ignore ();
128 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129 if (dcp && _play_referenced) {
130 dcp->set_decode_referenced ();
133 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134 _pieces.push_back (piece);
136 if (decoder->video) {
137 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140 if (decoder->audio) {
141 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144 if (decoder->subtitle) {
145 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
151 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152 if (i->content->audio) {
153 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154 _stream_states[j] = StreamState (i, i->content->position ());
159 if (!_play_referenced) {
160 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
161 shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
163 if (dc->reference_video()) {
164 _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
166 if (dc->reference_audio()) {
167 _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
173 _last_video_time = DCPTime ();
174 _last_audio_time = DCPTime ();
175 _have_valid_pieces = true;
179 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
181 shared_ptr<Content> c = w.lock ();
187 property == ContentProperty::POSITION ||
188 property == ContentProperty::LENGTH ||
189 property == ContentProperty::TRIM_START ||
190 property == ContentProperty::TRIM_END ||
191 property == ContentProperty::PATH ||
192 property == VideoContentProperty::FRAME_TYPE ||
193 property == DCPContentProperty::NEEDS_ASSETS ||
194 property == DCPContentProperty::NEEDS_KDM ||
195 property == SubtitleContentProperty::COLOUR ||
196 property == SubtitleContentProperty::OUTLINE ||
197 property == SubtitleContentProperty::SHADOW ||
198 property == SubtitleContentProperty::EFFECT_COLOUR ||
199 property == FFmpegContentProperty::SUBTITLE_STREAM ||
200 property == VideoContentProperty::COLOUR_CONVERSION
203 _have_valid_pieces = false;
207 property == SubtitleContentProperty::LINE_SPACING ||
208 property == SubtitleContentProperty::OUTLINE_WIDTH ||
209 property == SubtitleContentProperty::Y_SCALE ||
210 property == SubtitleContentProperty::FADE_IN ||
211 property == SubtitleContentProperty::FADE_OUT ||
212 property == ContentProperty::VIDEO_FRAME_RATE ||
213 property == SubtitleContentProperty::USE ||
214 property == SubtitleContentProperty::X_OFFSET ||
215 property == SubtitleContentProperty::Y_OFFSET ||
216 property == SubtitleContentProperty::X_SCALE ||
217 property == SubtitleContentProperty::FONTS ||
218 property == VideoContentProperty::CROP ||
219 property == VideoContentProperty::SCALE ||
220 property == VideoContentProperty::FADE_IN ||
221 property == VideoContentProperty::FADE_OUT
229 Player::set_video_container_size (dcp::Size s)
231 if (s == _video_container_size) {
235 _video_container_size = s;
237 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
238 _black_image->make_black ();
244 Player::playlist_changed ()
246 _have_valid_pieces = false;
251 Player::film_changed (Film::Property p)
253 /* Here we should notice Film properties that affect our output, and
254 alert listeners that our output now would be different to how it was
255 last time we were run.
258 if (p == Film::CONTAINER) {
260 } else if (p == Film::VIDEO_FRAME_RATE) {
261 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
262 so we need new pieces here.
264 _have_valid_pieces = false;
266 } else if (p == Film::AUDIO_PROCESSOR) {
267 if (_film->audio_processor ()) {
268 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
274 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
276 list<PositionImage> all;
278 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
283 /* We will scale the subtitle up to fit _video_container_size */
284 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
286 /* Then we need a corrective translation, consisting of two parts:
288 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
289 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
291 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
292 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
293 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
295 * Combining these two translations gives these expressions.
302 dcp::YUV_TO_RGB_REC601,
303 i->image->pixel_format (),
308 lrint (_video_container_size.width * i->rectangle.x),
309 lrint (_video_container_size.height * i->rectangle.y)
318 shared_ptr<PlayerVideo>
319 Player::black_player_video_frame () const
321 return shared_ptr<PlayerVideo> (
323 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326 _video_container_size,
327 _video_container_size,
330 PresetColourConversion::all().front().conversion
336 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
338 DCPTime s = t - piece->content->position ();
339 s = min (piece->content->length_after_trim(), s);
340 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
342 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
343 then convert that ContentTime to frames at the content's rate. However this fails for
344 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
345 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
347 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
349 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
353 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
355 /* See comment in dcp_to_content_video */
356 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
357 return max (DCPTime (), d + piece->content->position ());
361 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
363 DCPTime s = t - piece->content->position ();
364 s = min (piece->content->length_after_trim(), s);
365 /* See notes in dcp_to_content_video */
366 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
370 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
372 /* See comment in dcp_to_content_video */
373 DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
374 return max (DCPTime (), d + piece->content->position ());
378 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
380 DCPTime s = t - piece->content->position ();
381 s = min (piece->content->length_after_trim(), s);
382 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
386 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
388 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 list<shared_ptr<Font> >
392 Player::get_subtitle_fonts ()
394 if (!_have_valid_pieces) {
398 list<shared_ptr<Font> > fonts;
399 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
400 if (p->content->subtitle) {
401 /* XXX: things may go wrong if there are duplicate font IDs
402 with different font files.
404 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
405 copy (f.begin(), f.end(), back_inserter (fonts));
412 /** Set this player never to produce any video data */
414 Player::set_ignore_video ()
416 _ignore_video = true;
419 /** Set whether or not this player should always burn text subtitles into the image,
420 * regardless of the content settings.
421 * @param burn true to always burn subtitles, false to obey content settings.
424 Player::set_always_burn_subtitles (bool burn)
426 _always_burn_subtitles = burn;
433 _have_valid_pieces = false;
437 Player::set_play_referenced ()
439 _play_referenced = true;
440 _have_valid_pieces = false;
443 list<ReferencedReelAsset>
444 Player::get_reel_assets ()
446 list<ReferencedReelAsset> a;
448 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
449 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
454 scoped_ptr<DCPDecoder> decoder;
456 decoder.reset (new DCPDecoder (j, _film->log()));
462 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
464 DCPOMATIC_ASSERT (j->video_frame_rate ());
465 double const cfr = j->video_frame_rate().get();
466 Frame const trim_start = j->trim_start().frames_round (cfr);
467 Frame const trim_end = j->trim_end().frames_round (cfr);
468 int const ffr = _film->video_frame_rate ();
470 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
471 if (j->reference_video ()) {
472 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
473 DCPOMATIC_ASSERT (ra);
474 ra->set_entry_point (ra->entry_point() + trim_start);
475 ra->set_duration (ra->duration() - trim_start - trim_end);
477 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
481 if (j->reference_audio ()) {
482 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
483 DCPOMATIC_ASSERT (ra);
484 ra->set_entry_point (ra->entry_point() + trim_start);
485 ra->set_duration (ra->duration() - trim_start - trim_end);
487 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
491 if (j->reference_subtitle ()) {
492 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
493 DCPOMATIC_ASSERT (ra);
494 ra->set_entry_point (ra->entry_point() + trim_start);
495 ra->set_duration (ra->duration() - trim_start - trim_end);
497 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
501 /* Assume that main picture duration is the length of the reel */
502 offset += k->main_picture()->duration ();
512 if (!_have_valid_pieces) {
516 shared_ptr<Piece> earliest;
517 DCPTime earliest_content;
519 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
521 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
522 if (!earliest || t < earliest_content) {
523 earliest_content = t;
529 /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
530 NOT to fill gaps within content (the latter is done in ::video())
532 DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
534 /* Work out where to fill video from */
535 optional<DCPTime> video_fill_from;
536 if (_last_video_time) {
537 /* Fill from the last video or seek time */
538 video_fill_from = _last_video_time;
542 /* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom
543 Piece which emits black in spaces (we only emit if we are the earliest thing)
545 if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
546 list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
548 emit_video (black_player_video_frame(), p.front().from);
551 } else if (_playlist->length() == DCPTime()) {
552 /* Special case of an empty Film; just give one black frame */
553 emit_video (black_player_video_frame(), DCPTime());
557 optional<DCPTime> audio_fill_from;
558 if (_last_audio_time) {
559 /* Fill from the last audio or seek time */
560 audio_fill_from = _last_audio_time;
563 DCPTime audio_fill_towards = fill_towards;
564 if (earliest && earliest->content->audio) {
565 audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
568 if (audio_fill_from && audio_fill_from < audio_fill_towards && ((audio_fill_towards - *audio_fill_from) >= one_video_frame())) {
569 DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
570 if (period.duration() > one_video_frame()) {
571 period.to = period.from + one_video_frame();
573 list<DCPTimePeriod> p = subtract(period, _no_audio);
575 fill_audio (p.front());
581 earliest->done = earliest->decoder->pass ();
584 /* Emit any audio that is ready */
586 DCPTime pull_to = _playlist->length ();
587 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
588 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
589 pull_to = i->second.last_push_end;
593 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
594 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
595 if (_last_audio_time && i->second < *_last_audio_time) {
596 /* There has been an accurate seek and we have received some audio before the seek time;
599 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
606 if (_last_audio_time) {
607 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
610 emit_audio (i->first, i->second);
613 return !earliest && !filled;
616 optional<PositionImage>
617 Player::subtitles_for_frame (DCPTime time) const
619 list<PositionImage> subtitles;
621 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
623 /* Image subtitles */
624 list<PositionImage> c = transform_image_subtitles (i.image);
625 copy (c.begin(), c.end(), back_inserter (subtitles));
627 /* Text subtitles (rendered to an image) */
628 if (!i.text.empty ()) {
629 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
630 copy (s.begin(), s.end(), back_inserter (subtitles));
634 if (subtitles.empty ()) {
635 return optional<PositionImage> ();
638 return merge (subtitles);
642 Player::video (weak_ptr<Piece> wp, ContentVideo video)
644 shared_ptr<Piece> piece = wp.lock ();
649 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
650 if (frc.skip && (video.frame % 2) == 1) {
654 /* Time and period of the frame we will emit */
655 DCPTime const time = content_video_to_dcp (piece, video.frame);
656 DCPTimePeriod const period (time, time + one_video_frame());
658 /* Discard if it's outside the content's period or if it's before the last accurate seek */
660 time < piece->content->position() ||
661 time >= piece->content->end() ||
662 (_last_video_time && time < *_last_video_time)) {
666 /* Fill gaps that we discover now that we have some video which needs to be emitted */
668 optional<DCPTime> fill_to;
669 if (_last_video_time) {
670 fill_to = _last_video_time;
674 /* XXX: this may not work for 3D */
675 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
676 for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
677 LastVideoMap::const_iterator k = _last_video.find (wp);
678 if (k != _last_video.end ()) {
679 emit_video (k->second, j);
681 emit_video (black_player_video_frame(), j);
687 _last_video[wp].reset (
690 piece->content->video->crop (),
691 piece->content->video->fade (video.frame),
692 piece->content->video->scale().size (
693 piece->content->video, _video_container_size, _film->frame_size ()
695 _video_container_size,
698 piece->content->video->colour_conversion ()
702 emit_video (_last_video[wp], time);
705 /** Do our common processing on some audio */
707 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
709 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
713 if (content->gain() != 0) {
714 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
715 gain->apply_gain (content->gain ());
716 content_audio.audio = gain;
721 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
725 if (_audio_processor) {
726 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
731 _audio_merger.push (content_audio.audio, time);
732 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
733 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
737 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
739 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
741 shared_ptr<Piece> piece = wp.lock ();
746 shared_ptr<AudioContent> content = piece->content->audio;
747 DCPOMATIC_ASSERT (content);
749 /* Compute time in the DCP */
750 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
751 /* And the end of this block in the DCP */
752 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
754 /* Remove anything that comes before the start or after the end of the content */
755 if (time < piece->content->position()) {
756 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
758 /* This audio is entirely discarded */
761 content_audio.audio = cut.first;
763 } else if (time > piece->content->end()) {
766 } else if (end > piece->content->end()) {
767 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
768 if (remaining_frames == 0) {
771 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
772 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
773 content_audio.audio = cut;
776 audio_transform (content, stream, content_audio, time);
780 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
782 shared_ptr<Piece> piece = wp.lock ();
787 /* Apply content's subtitle offsets */
788 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
789 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
791 /* Apply content's subtitle scale */
792 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
793 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
795 /* Apply a corrective translation to keep the subtitle centred after that scale */
796 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
797 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
800 ps.image.push_back (subtitle.sub);
801 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
803 _active_subtitles.add_from (wp, ps, from);
807 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
809 shared_ptr<Piece> piece = wp.lock ();
815 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
817 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
818 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
819 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
820 float const xs = piece->content->subtitle->x_scale();
821 float const ys = piece->content->subtitle->y_scale();
822 float size = s.size();
824 /* Adjust size to express the common part of the scaling;
825 e.g. if xs = ys = 0.5 we scale size by 2.
827 if (xs > 1e-5 && ys > 1e-5) {
828 size *= 1 / min (1 / xs, 1 / ys);
832 /* Then express aspect ratio changes */
833 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
834 s.set_aspect_adjust (xs / ys);
837 s.set_in (dcp::Time(from.seconds(), 1000));
838 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
839 ps.add_fonts (piece->content->subtitle->fonts ());
842 _active_subtitles.add_from (wp, ps, from);
846 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
848 if (!_active_subtitles.have (wp)) {
852 shared_ptr<Piece> piece = wp.lock ();
857 DCPTime const dcp_to = content_time_to_dcp (piece, to);
859 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
861 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
862 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
867 Player::seek (DCPTime time, bool accurate)
869 if (_audio_processor) {
870 _audio_processor->flush ();
873 _audio_merger.clear ();
874 _active_subtitles.clear ();
876 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
877 if (time < i->content->position()) {
878 /* Before; seek to 0 */
879 i->decoder->seek (ContentTime(), accurate);
881 } else if (i->content->position() <= time && time < i->content->end()) {
882 /* During; seek to position */
883 i->decoder->seek (dcp_to_content_time (i, time), accurate);
886 /* After; this piece is done */
892 _last_video_time = time;
893 _last_audio_time = time;
895 _last_video_time = optional<DCPTime>();
896 _last_audio_time = optional<DCPTime>();
899 _last_video.clear ();
903 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
905 optional<PositionImage> subtitles = subtitles_for_frame (time);
907 pv->set_subtitle (subtitles.get ());
912 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
913 _last_video_time = time + one_video_frame();
914 _active_subtitles.clear_before (time);
919 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
922 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
926 Player::fill_audio (DCPTimePeriod period)
928 if (period.from == period.to) {
932 DCPOMATIC_ASSERT (period.from < period.to);
934 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
937 DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
938 Frame const samples = block.frames_round(_film->audio_frame_rate());
940 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
941 silence->make_silent ();
942 emit_audio (silence, t);
950 Player::one_video_frame () const
952 return DCPTime::from_frames (1, _film->video_frame_rate ());
955 pair<shared_ptr<AudioBuffers>, DCPTime>
956 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
958 DCPTime const discard_time = discard_to - time;
959 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
960 Frame remaining_frames = audio->frames() - discard_frames;
961 if (remaining_frames <= 0) {
962 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
964 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
965 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
966 return make_pair(cut, time + discard_time);