2 Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_subtitles.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83 , _playlist (playlist)
84 , _have_valid_pieces (false)
85 , _ignore_video (false)
86 , _ignore_audio (false)
87 , _always_burn_subtitles (false)
89 , _play_referenced (false)
90 , _audio_merger (_film->audio_frame_rate())
92 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
93 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
94 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
95 set_video_container_size (_film->frame_size ());
97 film_changed (Film::AUDIO_PROCESSOR);
99 seek (DCPTime (), true);
103 Player::setup_pieces ()
107 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109 if (!i->paths_valid ()) {
113 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
114 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
117 /* Not something that we can decode; e.g. Atmos content */
121 if (decoder->video && _ignore_video) {
122 decoder->video->set_ignore ();
125 if (decoder->audio && _ignore_audio) {
126 decoder->audio->set_ignore ();
129 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
130 if (dcp && _play_referenced) {
131 dcp->set_decode_referenced ();
134 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
135 _pieces.push_back (piece);
137 if (decoder->video) {
138 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
141 if (decoder->audio) {
142 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
145 if (decoder->subtitle) {
146 decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147 decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148 decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
152 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
153 if (i->content->audio) {
154 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
155 _stream_states[j] = StreamState (i, i->content->position ());
160 if (!_play_referenced) {
161 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
162 shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164 if (dc->reference_video()) {
165 _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167 if (dc->reference_audio()) {
168 _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
174 _last_video_time = DCPTime ();
175 _last_audio_time = DCPTime ();
176 _have_valid_pieces = true;
180 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 shared_ptr<Content> c = w.lock ();
188 property == ContentProperty::POSITION ||
189 property == ContentProperty::LENGTH ||
190 property == ContentProperty::TRIM_START ||
191 property == ContentProperty::TRIM_END ||
192 property == ContentProperty::PATH ||
193 property == VideoContentProperty::FRAME_TYPE ||
194 property == DCPContentProperty::NEEDS_ASSETS ||
195 property == DCPContentProperty::NEEDS_KDM ||
196 property == SubtitleContentProperty::COLOUR ||
197 property == SubtitleContentProperty::OUTLINE ||
198 property == SubtitleContentProperty::SHADOW ||
199 property == SubtitleContentProperty::EFFECT_COLOUR ||
200 property == FFmpegContentProperty::SUBTITLE_STREAM ||
201 property == VideoContentProperty::COLOUR_CONVERSION
204 _have_valid_pieces = false;
208 property == SubtitleContentProperty::LINE_SPACING ||
209 property == SubtitleContentProperty::OUTLINE_WIDTH ||
210 property == SubtitleContentProperty::Y_SCALE ||
211 property == SubtitleContentProperty::FADE_IN ||
212 property == SubtitleContentProperty::FADE_OUT ||
213 property == ContentProperty::VIDEO_FRAME_RATE ||
214 property == SubtitleContentProperty::USE ||
215 property == SubtitleContentProperty::X_OFFSET ||
216 property == SubtitleContentProperty::Y_OFFSET ||
217 property == SubtitleContentProperty::X_SCALE ||
218 property == SubtitleContentProperty::FONTS ||
219 property == VideoContentProperty::CROP ||
220 property == VideoContentProperty::SCALE ||
221 property == VideoContentProperty::FADE_IN ||
222 property == VideoContentProperty::FADE_OUT
230 Player::set_video_container_size (dcp::Size s)
232 if (s == _video_container_size) {
236 _video_container_size = s;
238 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
239 _black_image->make_black ();
245 Player::playlist_changed ()
247 _have_valid_pieces = false;
252 Player::film_changed (Film::Property p)
254 /* Here we should notice Film properties that affect our output, and
255 alert listeners that our output now would be different to how it was
256 last time we were run.
259 if (p == Film::CONTAINER) {
261 } else if (p == Film::VIDEO_FRAME_RATE) {
262 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
263 so we need new pieces here.
265 _have_valid_pieces = false;
267 } else if (p == Film::AUDIO_PROCESSOR) {
268 if (_film->audio_processor ()) {
269 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
275 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 list<PositionImage> all;
279 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
284 /* We will scale the subtitle up to fit _video_container_size */
285 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287 /* Then we need a corrective translation, consisting of two parts:
289 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
290 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
293 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
294 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296 * Combining these two translations gives these expressions.
303 dcp::YUV_TO_RGB_REC601,
304 i->image->pixel_format (),
309 lrint (_video_container_size.width * i->rectangle.x),
310 lrint (_video_container_size.height * i->rectangle.y)
319 shared_ptr<PlayerVideo>
320 Player::black_player_video_frame () const
322 return shared_ptr<PlayerVideo> (
324 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
327 _video_container_size,
328 _video_container_size,
331 PresetColourConversion::all().front().conversion
337 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 DCPTime s = t - piece->content->position ();
340 s = min (piece->content->length_after_trim(), s);
341 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
344 then convert that ContentTime to frames at the content's rate. However this fails for
345 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
346 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
354 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 /* See comment in dcp_to_content_video */
357 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
358 return max (DCPTime (), d + piece->content->position ());
362 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 DCPTime s = t - piece->content->position ();
365 s = min (piece->content->length_after_trim(), s);
366 /* See notes in dcp_to_content_video */
367 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
371 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 /* See comment in dcp_to_content_video */
374 DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
375 return max (DCPTime (), d + piece->content->position ());
379 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 DCPTime s = t - piece->content->position ();
382 s = min (piece->content->length_after_trim(), s);
383 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
387 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
392 list<shared_ptr<Font> >
393 Player::get_subtitle_fonts ()
395 if (!_have_valid_pieces) {
399 list<shared_ptr<Font> > fonts;
400 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
401 if (p->content->subtitle) {
402 /* XXX: things may go wrong if there are duplicate font IDs
403 with different font files.
405 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
406 copy (f.begin(), f.end(), back_inserter (fonts));
413 /** Set this player never to produce any video data */
415 Player::set_ignore_video ()
417 _ignore_video = true;
420 /** Set whether or not this player should always burn text subtitles into the image,
421 * regardless of the content settings.
422 * @param burn true to always burn subtitles, false to obey content settings.
425 Player::set_always_burn_subtitles (bool burn)
427 _always_burn_subtitles = burn;
434 _have_valid_pieces = false;
438 Player::set_play_referenced ()
440 _play_referenced = true;
441 _have_valid_pieces = false;
444 list<ReferencedReelAsset>
445 Player::get_reel_assets ()
447 list<ReferencedReelAsset> a;
449 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
450 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
455 scoped_ptr<DCPDecoder> decoder;
457 decoder.reset (new DCPDecoder (j, _film->log()));
463 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465 DCPOMATIC_ASSERT (j->video_frame_rate ());
466 double const cfr = j->video_frame_rate().get();
467 Frame const trim_start = j->trim_start().frames_round (cfr);
468 Frame const trim_end = j->trim_end().frames_round (cfr);
469 int const ffr = _film->video_frame_rate ();
471 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
472 if (j->reference_video ()) {
473 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
474 DCPOMATIC_ASSERT (ra);
475 ra->set_entry_point (ra->entry_point() + trim_start);
476 ra->set_duration (ra->duration() - trim_start - trim_end);
478 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
482 if (j->reference_audio ()) {
483 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
484 DCPOMATIC_ASSERT (ra);
485 ra->set_entry_point (ra->entry_point() + trim_start);
486 ra->set_duration (ra->duration() - trim_start - trim_end);
488 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
492 if (j->reference_subtitle ()) {
493 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
494 DCPOMATIC_ASSERT (ra);
495 ra->set_entry_point (ra->entry_point() + trim_start);
496 ra->set_duration (ra->duration() - trim_start - trim_end);
498 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
502 /* Assume that main picture duration is the length of the reel */
503 offset += k->main_picture()->duration ();
513 if (!_have_valid_pieces) {
517 shared_ptr<Piece> earliest;
518 DCPTime earliest_content;
520 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
523 if (!earliest || t < earliest_content) {
524 earliest_content = t;
530 /* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
531 NOT to fill gaps within content (the latter is done in ::video())
533 DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
535 /* Work out where to fill video from */
536 optional<DCPTime> video_fill_from;
537 if (_last_video_time) {
538 /* Fill from the last video or seek time */
539 video_fill_from = _last_video_time;
543 /* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom
544 Piece which emits black in spaces (we only emit if we are the earliest thing)
546 if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
547 list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
549 emit_video (black_player_video_frame(), p.front().from);
552 } else if (_playlist->length() == DCPTime()) {
553 /* Special case of an empty Film; just give one black frame */
554 emit_video (black_player_video_frame(), DCPTime());
558 optional<DCPTime> audio_fill_from;
559 if (_last_audio_time) {
560 /* Fill from the last audio or seek time */
561 audio_fill_from = _last_audio_time;
564 DCPTime audio_fill_towards = fill_towards;
565 if (earliest && earliest->content->audio) {
566 audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
569 if (audio_fill_from && audio_fill_from < audio_fill_towards) {
570 DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
571 if (period.duration() > one_video_frame()) {
572 period.to = period.from + one_video_frame();
574 list<DCPTimePeriod> p = subtract(period, _no_audio);
576 fill_audio (p.front());
582 earliest->done = earliest->decoder->pass ();
583 if (earliest->done && earliest->content->audio) {
584 /* Flush the Player audio system for this piece */
585 BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
586 audio_flush (earliest, i);
591 /* Emit any audio that is ready */
593 DCPTime pull_to = _playlist->length ();
594 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
595 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
596 pull_to = i->second.last_push_end;
600 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
601 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
602 if (_last_audio_time && i->second < *_last_audio_time) {
603 /* There has been an accurate seek and we have received some audio before the seek time;
606 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
613 if (_last_audio_time) {
614 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
617 emit_audio (i->first, i->second);
620 return !earliest && !filled;
623 optional<PositionImage>
624 Player::subtitles_for_frame (DCPTime time) const
626 list<PositionImage> subtitles;
628 BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
630 /* Image subtitles */
631 list<PositionImage> c = transform_image_subtitles (i.image);
632 copy (c.begin(), c.end(), back_inserter (subtitles));
634 /* Text subtitles (rendered to an image) */
635 if (!i.text.empty ()) {
636 list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
637 copy (s.begin(), s.end(), back_inserter (subtitles));
641 if (subtitles.empty ()) {
642 return optional<PositionImage> ();
645 return merge (subtitles);
649 Player::video (weak_ptr<Piece> wp, ContentVideo video)
651 shared_ptr<Piece> piece = wp.lock ();
656 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
657 if (frc.skip && (video.frame % 2) == 1) {
661 /* Time and period of the frame we will emit */
662 DCPTime const time = content_video_to_dcp (piece, video.frame);
663 DCPTimePeriod const period (time, time + one_video_frame());
665 /* Discard if it's outside the content's period or if it's before the last accurate seek */
667 time < piece->content->position() ||
668 time >= piece->content->end() ||
669 (_last_video_time && time < *_last_video_time)) {
673 /* Fill gaps that we discover now that we have some video which needs to be emitted */
675 optional<DCPTime> fill_to;
676 if (_last_video_time) {
677 fill_to = _last_video_time;
681 /* XXX: this may not work for 3D */
682 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
683 for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
684 LastVideoMap::const_iterator k = _last_video.find (wp);
685 if (k != _last_video.end ()) {
686 emit_video (k->second, j);
688 emit_video (black_player_video_frame(), j);
694 _last_video[wp].reset (
697 piece->content->video->crop (),
698 piece->content->video->fade (video.frame),
699 piece->content->video->scale().size (
700 piece->content->video, _video_container_size, _film->frame_size ()
702 _video_container_size,
705 piece->content->video->colour_conversion ()
709 emit_video (_last_video[wp], time);
713 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
715 shared_ptr<AudioContent> content = piece->content->audio;
716 DCPOMATIC_ASSERT (content);
718 shared_ptr<Resampler> r = resampler (content, stream, false);
723 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
724 if (ro.first->frames() == 0) {
728 ContentAudio content_audio;
729 content_audio.audio = ro.first;
730 content_audio.frame = ro.second;
732 /* Compute time in the DCP */
733 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
735 audio_transform (content, stream, content_audio, time);
738 /** Do our common processing on some audio */
740 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
742 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
746 if (content->gain() != 0) {
747 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
748 gain->apply_gain (content->gain ());
749 content_audio.audio = gain;
754 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
755 dcp_mapped->make_silent ();
757 AudioMapping map = stream->mapping ();
758 for (int i = 0; i < map.input_channels(); ++i) {
759 for (int j = 0; j < dcp_mapped->channels(); ++j) {
760 if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
761 dcp_mapped->accumulate_channel (
762 content_audio.audio.get(),
764 static_cast<dcp::Channel> (j),
765 map.get (i, static_cast<dcp::Channel> (j))
771 content_audio.audio = dcp_mapped;
775 if (_audio_processor) {
776 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
779 /* Pad any gap which may be caused by audio delay */
781 if (_last_audio_time) {
782 fill_audio (DCPTimePeriod (*_last_audio_time, time));
787 _audio_merger.push (content_audio.audio, time);
788 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
789 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
793 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
795 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
797 shared_ptr<Piece> piece = wp.lock ();
802 shared_ptr<AudioContent> content = piece->content->audio;
803 DCPOMATIC_ASSERT (content);
806 if (stream->frame_rate() != content->resampled_frame_rate()) {
807 shared_ptr<Resampler> r = resampler (content, stream, true);
808 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
809 if (ro.first->frames() == 0) {
812 content_audio.audio = ro.first;
813 content_audio.frame = ro.second;
816 /* Compute time in the DCP */
817 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
818 /* And the end of this block in the DCP */
819 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
821 /* Remove anything that comes before the start or after the end of the content */
822 if (time < piece->content->position()) {
823 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
825 /* This audio is entirely discarded */
828 content_audio.audio = cut.first;
830 } else if (time > piece->content->end()) {
833 } else if (end > piece->content->end()) {
834 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
835 if (remaining_frames == 0) {
838 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
839 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
840 content_audio.audio = cut;
843 audio_transform (content, stream, content_audio, time);
847 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
849 shared_ptr<Piece> piece = wp.lock ();
854 /* Apply content's subtitle offsets */
855 subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
856 subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
858 /* Apply content's subtitle scale */
859 subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
860 subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
862 /* Apply a corrective translation to keep the subtitle centred after that scale */
863 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
864 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
867 ps.image.push_back (subtitle.sub);
868 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
870 _active_subtitles.add_from (wp, ps, from);
874 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
876 shared_ptr<Piece> piece = wp.lock ();
882 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
884 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
885 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
886 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
887 float const xs = piece->content->subtitle->x_scale();
888 float const ys = piece->content->subtitle->y_scale();
889 float size = s.size();
891 /* Adjust size to express the common part of the scaling;
892 e.g. if xs = ys = 0.5 we scale size by 2.
894 if (xs > 1e-5 && ys > 1e-5) {
895 size *= 1 / min (1 / xs, 1 / ys);
899 /* Then express aspect ratio changes */
900 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
901 s.set_aspect_adjust (xs / ys);
904 s.set_in (dcp::Time(from.seconds(), 1000));
905 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
906 ps.add_fonts (piece->content->subtitle->fonts ());
909 _active_subtitles.add_from (wp, ps, from);
913 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
915 if (!_active_subtitles.have (wp)) {
919 shared_ptr<Piece> piece = wp.lock ();
924 DCPTime const dcp_to = content_time_to_dcp (piece, to);
926 pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
928 if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
929 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
934 Player::seek (DCPTime time, bool accurate)
936 if (_audio_processor) {
937 _audio_processor->flush ();
940 for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
945 _audio_merger.clear ();
946 _active_subtitles.clear ();
948 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
949 if (time < i->content->position()) {
950 /* Before; seek to 0 */
951 i->decoder->seek (ContentTime(), accurate);
953 } else if (i->content->position() <= time && time < i->content->end()) {
954 /* During; seek to position */
955 i->decoder->seek (dcp_to_content_time (i, time), accurate);
958 /* After; this piece is done */
964 _last_video_time = time;
965 _last_audio_time = time;
967 _last_video_time = optional<DCPTime>();
968 _last_audio_time = optional<DCPTime>();
972 shared_ptr<Resampler>
973 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
975 ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
976 if (i != _resamplers.end ()) {
981 return shared_ptr<Resampler> ();
985 "Creating new resampler from %1 to %2 with %3 channels",
986 stream->frame_rate(),
987 content->resampled_frame_rate(),
991 shared_ptr<Resampler> r (
992 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
995 _resamplers[make_pair(content, stream)] = r;
1000 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1002 optional<PositionImage> subtitles = subtitles_for_frame (time);
1004 pv->set_subtitle (subtitles.get ());
1009 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1010 _last_video_time = time + one_video_frame();
1011 _active_subtitles.clear_before (time);
1016 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1019 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1023 Player::fill_audio (DCPTimePeriod period)
1025 if (period.from == period.to) {
1029 BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1032 DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1033 Frame const samples = block.frames_round(_film->audio_frame_rate());
1035 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1036 silence->make_silent ();
1037 emit_audio (silence, t);
1045 Player::one_video_frame () const
1047 return DCPTime::from_frames (1, _film->video_frame_rate ());
1050 pair<shared_ptr<AudioBuffers>, DCPTime>
1051 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1053 DCPTime const discard_time = discard_to - time;
1054 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1055 Frame remaining_frames = audio->frames() - discard_frames;
1056 if (remaining_frames <= 0) {
1057 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1059 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1060 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1061 return make_pair(cut, time + discard_time);