2 Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
28 #include "raw_image_proxy.h"
31 #include "render_text.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
89 , _playlist (playlist)
91 , _ignore_video (false)
92 , _ignore_audio (false)
93 , _ignore_text (false)
94 , _always_burn_open_subtitles (false)
96 , _play_referenced (false)
97 , _audio_merger (_film->audio_frame_rate())
100 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102 _playlist_content_may_change_connection = _playlist->ContentMayChange.connect (bind(&Player::playlist_content_may_change, this));
103 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind(&Player::playlist_content_changed, this, _1, _2, _3));
104 _playlist_content_not_changed_connection = _playlist->ContentNotChanged.connect (bind(&Player::playlist_content_not_changed, this));
105 set_video_container_size (_film->frame_size ());
107 film_changed (Film::AUDIO_PROCESSOR);
110 seek (DCPTime (), true);
119 Player::setup_pieces ()
121 boost::mutex::scoped_lock lm (_mutex);
122 setup_pieces_unlocked ();
126 Player::setup_pieces_unlocked ()
131 _shuffler = new Shuffler();
132 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
134 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
136 if (!i->paths_valid ()) {
140 if (_ignore_video && _ignore_audio && i->text.empty()) {
141 /* We're only interested in text and this content has none */
145 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
146 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
149 /* Not something that we can decode; e.g. Atmos content */
153 if (decoder->video && _ignore_video) {
154 decoder->video->set_ignore (true);
157 if (decoder->audio && _ignore_audio) {
158 decoder->audio->set_ignore (true);
162 BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
163 i->set_ignore (true);
167 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
169 dcp->set_decode_referenced (_play_referenced);
170 if (_play_referenced) {
171 dcp->set_forced_reduction (_dcp_decode_reduction);
175 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
176 _pieces.push_back (piece);
178 if (decoder->video) {
179 if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
180 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
181 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
183 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
187 if (decoder->audio) {
188 decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
191 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
193 while (j != decoder->text.end()) {
194 (*j)->BitmapStart.connect (
195 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
197 (*j)->PlainStart.connect (
198 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
201 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
208 _stream_states.clear ();
209 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
210 if (i->content->audio) {
211 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
212 _stream_states[j] = StreamState (i, i->content->position ());
217 _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
218 _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
220 _last_video_time = DCPTime ();
221 _last_video_eyes = EYES_BOTH;
222 _last_audio_time = DCPTime ();
227 Player::playlist_content_may_change ()
230 boost::mutex::scoped_lock lm (_mutex);
231 /* The player content is probably about to change, so we can't carry on
232 until that has happened and we've rebuilt our pieces. Stop pass()
233 and seek() from working until then.
242 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
244 /* A change in our content has gone through. Re-build our pieces and signal
245 it to anybody that is interested.
248 shared_ptr<Content> c = w.lock ();
256 property == ContentProperty::POSITION ||
257 property == ContentProperty::LENGTH ||
258 property == ContentProperty::TRIM_START ||
259 property == ContentProperty::TRIM_END ||
260 property == ContentProperty::PATH ||
261 property == VideoContentProperty::FRAME_TYPE ||
262 property == VideoContentProperty::COLOUR_CONVERSION ||
263 property == AudioContentProperty::STREAMS ||
264 property == DCPContentProperty::NEEDS_ASSETS ||
265 property == DCPContentProperty::NEEDS_KDM ||
266 property == DCPContentProperty::CPL ||
267 property == TextContentProperty::COLOUR ||
268 property == TextContentProperty::EFFECT ||
269 property == TextContentProperty::EFFECT_COLOUR ||
270 property == FFmpegContentProperty::SUBTITLE_STREAM ||
271 property == FFmpegContentProperty::FILTERS ||
272 property == TextContentProperty::LINE_SPACING ||
273 property == TextContentProperty::OUTLINE_WIDTH ||
274 property == TextContentProperty::Y_SCALE ||
275 property == TextContentProperty::FADE_IN ||
276 property == TextContentProperty::FADE_OUT ||
277 property == ContentProperty::VIDEO_FRAME_RATE ||
278 property == TextContentProperty::USE ||
279 property == TextContentProperty::X_OFFSET ||
280 property == TextContentProperty::Y_OFFSET ||
281 property == TextContentProperty::X_SCALE ||
282 property == TextContentProperty::FONTS ||
283 property == TextContentProperty::TYPE ||
284 property == VideoContentProperty::CROP ||
285 property == VideoContentProperty::SCALE ||
286 property == VideoContentProperty::FADE_IN ||
287 property == VideoContentProperty::FADE_OUT
290 Changed (property, frequent);
295 Player::playlist_content_not_changed ()
297 /* A possible content change did end up happening for some reason */
302 Player::set_video_container_size (dcp::Size s)
305 boost::mutex::scoped_lock lm (_mutex);
307 if (s == _video_container_size) {
311 _video_container_size = s;
313 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314 _black_image->make_black ();
317 Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321 Player::playlist_changed ()
324 Changed (PlayerProperty::PLAYLIST, false);
328 Player::film_changed (Film::Property p)
330 /* Here we should notice Film properties that affect our output, and
331 alert listeners that our output now would be different to how it was
332 last time we were run.
335 if (p == Film::CONTAINER) {
336 Changed (PlayerProperty::FILM_CONTAINER, false);
337 } else if (p == Film::VIDEO_FRAME_RATE) {
338 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
339 so we need new pieces here.
342 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
343 } else if (p == Film::AUDIO_PROCESSOR) {
344 if (_film->audio_processor ()) {
345 boost::mutex::scoped_lock lm (_mutex);
346 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
348 } else if (p == Film::AUDIO_CHANNELS) {
349 boost::mutex::scoped_lock lm (_mutex);
350 _audio_merger.clear ();
355 Player::transform_bitmap_texts (list<BitmapText> subs) const
357 list<PositionImage> all;
359 for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
364 /* We will scale the subtitle up to fit _video_container_size */
365 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
371 dcp::YUV_TO_RGB_REC601,
372 i->image->pixel_format (),
377 lrint (_video_container_size.width * i->rectangle.x),
378 lrint (_video_container_size.height * i->rectangle.y)
387 shared_ptr<PlayerVideo>
388 Player::black_player_video_frame (Eyes eyes) const
390 return shared_ptr<PlayerVideo> (
392 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
395 _video_container_size,
396 _video_container_size,
399 PresetColourConversion::all().front().conversion,
400 boost::weak_ptr<Content>(),
401 boost::optional<Frame>()
407 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
409 DCPTime s = t - piece->content->position ();
410 s = min (piece->content->length_after_trim(), s);
411 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
413 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
414 then convert that ContentTime to frames at the content's rate. However this fails for
415 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
416 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
418 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
420 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
424 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
426 /* See comment in dcp_to_content_video */
427 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
428 return d + piece->content->position();
432 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
434 DCPTime s = t - piece->content->position ();
435 s = min (piece->content->length_after_trim(), s);
436 /* See notes in dcp_to_content_video */
437 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
441 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
443 /* See comment in dcp_to_content_video */
444 return DCPTime::from_frames (f, _film->audio_frame_rate())
445 - DCPTime (piece->content->trim_start(), piece->frc)
446 + piece->content->position();
450 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
452 DCPTime s = t - piece->content->position ();
453 s = min (piece->content->length_after_trim(), s);
454 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
458 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
460 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
463 list<shared_ptr<Font> >
464 Player::get_subtitle_fonts ()
466 boost::mutex::scoped_lock lm (_mutex);
468 list<shared_ptr<Font> > fonts;
469 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
470 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
471 /* XXX: things may go wrong if there are duplicate font IDs
472 with different font files.
474 list<shared_ptr<Font> > f = j->fonts ();
475 copy (f.begin(), f.end(), back_inserter (fonts));
482 /** Set this player never to produce any video data */
484 Player::set_ignore_video ()
486 boost::mutex::scoped_lock lm (_mutex);
487 _ignore_video = true;
488 setup_pieces_unlocked ();
492 Player::set_ignore_audio ()
494 boost::mutex::scoped_lock lm (_mutex);
495 _ignore_audio = true;
496 setup_pieces_unlocked ();
500 Player::set_ignore_text ()
502 boost::mutex::scoped_lock lm (_mutex);
504 setup_pieces_unlocked ();
507 /** Set the player to always burn open texts into the image regardless of the content settings */
509 Player::set_always_burn_open_subtitles ()
511 boost::mutex::scoped_lock lm (_mutex);
512 _always_burn_open_subtitles = true;
515 /** Sets up the player to be faster, possibly at the expense of quality */
519 boost::mutex::scoped_lock lm (_mutex);
521 setup_pieces_unlocked ();
525 Player::set_play_referenced ()
527 boost::mutex::scoped_lock lm (_mutex);
528 _play_referenced = true;
529 setup_pieces_unlocked ();
532 list<ReferencedReelAsset>
533 Player::get_reel_assets ()
535 /* Does not require a lock on _mutex as it's only called from DCPEncoder */
537 list<ReferencedReelAsset> a;
539 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
540 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
545 scoped_ptr<DCPDecoder> decoder;
547 decoder.reset (new DCPDecoder (j, _film->log(), false));
553 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
555 DCPOMATIC_ASSERT (j->video_frame_rate ());
556 double const cfr = j->video_frame_rate().get();
557 Frame const trim_start = j->trim_start().frames_round (cfr);
558 Frame const trim_end = j->trim_end().frames_round (cfr);
559 int const ffr = _film->video_frame_rate ();
561 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
562 if (j->reference_video ()) {
563 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
564 DCPOMATIC_ASSERT (ra);
565 ra->set_entry_point (ra->entry_point() + trim_start);
566 ra->set_duration (ra->duration() - trim_start - trim_end);
568 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
572 if (j->reference_audio ()) {
573 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
574 DCPOMATIC_ASSERT (ra);
575 ra->set_entry_point (ra->entry_point() + trim_start);
576 ra->set_duration (ra->duration() - trim_start - trim_end);
578 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
582 if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
583 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
584 DCPOMATIC_ASSERT (ra);
585 ra->set_entry_point (ra->entry_point() + trim_start);
586 ra->set_duration (ra->duration() - trim_start - trim_end);
588 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
592 if (j->reference_text (TEXT_CLOSED_CAPTION)) {
593 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
594 DCPOMATIC_ASSERT (ra);
595 ra->set_entry_point (ra->entry_point() + trim_start);
596 ra->set_duration (ra->duration() - trim_start - trim_end);
598 ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
602 /* Assume that main picture duration is the length of the reel */
603 offset += k->main_picture()->duration ();
613 boost::mutex::scoped_lock lm (_mutex);
616 /* We can't pass in this state */
620 if (_playlist->length() == DCPTime()) {
621 /* Special case of an empty Film; just give one black frame */
622 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
626 /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
628 shared_ptr<Piece> earliest_content;
629 optional<DCPTime> earliest_time;
631 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
636 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
637 if (t > i->content->end()) {
641 /* Given two choices at the same time, pick the one with texts so we see it before
644 if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
646 earliest_content = i;
660 if (earliest_content) {
664 if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
665 earliest_time = _black.position ();
669 if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
670 earliest_time = _silent.position ();
676 earliest_content->done = earliest_content->decoder->pass ();
679 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
680 _black.set_position (_black.position() + one_video_frame());
684 DCPTimePeriod period (_silent.period_at_position());
685 if (_last_audio_time) {
686 /* Sometimes the thing that happened last finishes fractionally before
687 this silence. Bodge the start time of the silence to fix it. I'm
688 not sure if this is the right solution --- maybe the last thing should
689 be padded `forward' rather than this thing padding `back'.
691 period.from = min(period.from, *_last_audio_time);
693 if (period.duration() > one_video_frame()) {
694 period.to = period.from + one_video_frame();
697 _silent.set_position (period.to);
705 /* Emit any audio that is ready */
707 /* Work out the time before which the audio is definitely all here. This is the earliest last_push_end of one
708 of our streams, or the position of the _silent.
710 DCPTime pull_to = _film->length ();
711 for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
712 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
713 pull_to = i->second.last_push_end;
716 if (!_silent.done() && _silent.position() < pull_to) {
717 pull_to = _silent.position();
720 list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
721 for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
722 if (_last_audio_time && i->second < *_last_audio_time) {
723 /* This new data comes before the last we emitted (or the last seek); discard it */
724 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
729 } else if (_last_audio_time && i->second > *_last_audio_time) {
730 /* There's a gap between this data and the last we emitted; fill with silence */
731 fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
734 emit_audio (i->first, i->second);
739 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
740 do_emit_video(i->first, i->second);
747 /** @return Open subtitles for the frame at the given time, converted to images */
748 optional<PositionImage>
749 Player::open_subtitles_for_frame (DCPTime time) const
751 list<PositionImage> captions;
752 int const vfr = _film->video_frame_rate();
756 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
759 /* Bitmap subtitles */
760 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
761 copy (c.begin(), c.end(), back_inserter (captions));
763 /* String subtitles (rendered to an image) */
764 if (!j.string.empty ()) {
765 list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
766 copy (s.begin(), s.end(), back_inserter (captions));
770 if (captions.empty ()) {
771 return optional<PositionImage> ();
774 return merge (captions);
778 Player::video (weak_ptr<Piece> wp, ContentVideo video)
780 shared_ptr<Piece> piece = wp.lock ();
785 FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
786 if (frc.skip && (video.frame % 2) == 1) {
790 /* Time of the first frame we will emit */
791 DCPTime const time = content_video_to_dcp (piece, video.frame);
793 /* Discard if it's before the content's period or the last accurate seek. We can't discard
794 if it's after the content's period here as in that case we still need to fill any gap between
795 `now' and the end of the content's period.
797 if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
801 /* Fill gaps that we discover now that we have some video which needs to be emitted.
802 This is where we need to fill to.
804 DCPTime fill_to = min (time, piece->content->end());
806 if (_last_video_time) {
807 DCPTime fill_from = max (*_last_video_time, piece->content->position());
808 LastVideoMap::const_iterator last = _last_video.find (wp);
809 if (_film->three_d()) {
810 Eyes fill_to_eyes = video.eyes;
811 if (fill_to == piece->content->end()) {
812 /* Don't fill after the end of the content */
813 fill_to_eyes = EYES_LEFT;
815 DCPTime j = fill_from;
816 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
817 if (eyes == EYES_BOTH) {
820 while (j < fill_to || eyes != fill_to_eyes) {
821 if (last != _last_video.end()) {
822 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
823 copy->set_eyes (eyes);
824 emit_video (copy, j);
826 emit_video (black_player_video_frame(eyes), j);
828 if (eyes == EYES_RIGHT) {
829 j += one_video_frame();
831 eyes = increment_eyes (eyes);
834 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
835 if (last != _last_video.end()) {
836 emit_video (last->second, j);
838 emit_video (black_player_video_frame(EYES_BOTH), j);
844 _last_video[wp].reset (
847 piece->content->video->crop (),
848 piece->content->video->fade (video.frame),
849 piece->content->video->scale().size (
850 piece->content->video, _video_container_size, _film->frame_size ()
852 _video_container_size,
855 piece->content->video->colour_conversion(),
862 for (int i = 0; i < frc.repeat; ++i) {
863 if (t < piece->content->end()) {
864 emit_video (_last_video[wp], t);
866 t += one_video_frame ();
871 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
873 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
875 shared_ptr<Piece> piece = wp.lock ();
880 shared_ptr<AudioContent> content = piece->content->audio;
881 DCPOMATIC_ASSERT (content);
883 /* Compute time in the DCP */
884 DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
885 /* And the end of this block in the DCP */
886 DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
888 /* Remove anything that comes before the start or after the end of the content */
889 if (time < piece->content->position()) {
890 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
892 /* This audio is entirely discarded */
895 content_audio.audio = cut.first;
897 } else if (time > piece->content->end()) {
900 } else if (end > piece->content->end()) {
901 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
902 if (remaining_frames == 0) {
905 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
906 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
907 content_audio.audio = cut;
910 DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
914 if (content->gain() != 0) {
915 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
916 gain->apply_gain (content->gain ());
917 content_audio.audio = gain;
922 content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
926 if (_audio_processor) {
927 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
932 _audio_merger.push (content_audio.audio, time);
933 DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
934 _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
938 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
940 shared_ptr<Piece> piece = wp.lock ();
941 shared_ptr<const TextContent> text = wc.lock ();
942 if (!piece || !text) {
946 /* Apply content's subtitle offsets */
947 subtitle.sub.rectangle.x += text->x_offset ();
948 subtitle.sub.rectangle.y += text->y_offset ();
950 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
951 subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
952 subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
954 /* Apply content's subtitle scale */
955 subtitle.sub.rectangle.width *= text->x_scale ();
956 subtitle.sub.rectangle.height *= text->y_scale ();
959 ps.bitmap.push_back (subtitle.sub);
960 DCPTime from (content_time_to_dcp (piece, subtitle.from()));
962 _active_texts[subtitle.type()].add_from (wc, ps, from);
966 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
968 shared_ptr<Piece> piece = wp.lock ();
969 shared_ptr<const TextContent> text = wc.lock ();
970 if (!piece || !text) {
975 DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
977 if (from > piece->content->end()) {
981 BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
982 s.set_h_position (s.h_position() + text->x_offset ());
983 s.set_v_position (s.v_position() + text->y_offset ());
984 float const xs = text->x_scale();
985 float const ys = text->y_scale();
986 float size = s.size();
988 /* Adjust size to express the common part of the scaling;
989 e.g. if xs = ys = 0.5 we scale size by 2.
991 if (xs > 1e-5 && ys > 1e-5) {
992 size *= 1 / min (1 / xs, 1 / ys);
996 /* Then express aspect ratio changes */
997 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
998 s.set_aspect_adjust (xs / ys);
1001 s.set_in (dcp::Time(from.seconds(), 1000));
1002 ps.string.push_back (StringText (s, text->outline_width()));
1003 ps.add_fonts (text->fonts ());
1006 _active_texts[subtitle.type()].add_from (wc, ps, from);
1010 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
1012 if (!_active_texts[type].have (wc)) {
1016 shared_ptr<Piece> piece = wp.lock ();
1017 shared_ptr<const TextContent> text = wc.lock ();
1018 if (!piece || !text) {
1022 DCPTime const dcp_to = content_time_to_dcp (piece, to);
1024 if (dcp_to > piece->content->end()) {
1028 pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1030 bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1031 if (text->use() && !always && !text->burn()) {
1032 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1037 Player::seek (DCPTime time, bool accurate)
1039 boost::mutex::scoped_lock lm (_mutex);
1042 /* We can't seek in this state */
1047 _shuffler->clear ();
1052 if (_audio_processor) {
1053 _audio_processor->flush ();
1056 _audio_merger.clear ();
1057 for (int i = 0; i < TEXT_COUNT; ++i) {
1058 _active_texts[i].clear ();
1061 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1062 if (time < i->content->position()) {
1063 /* Before; seek to the start of the content */
1064 i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1066 } else if (i->content->position() <= time && time < i->content->end()) {
1067 /* During; seek to position */
1068 i->decoder->seek (dcp_to_content_time (i, time), accurate);
1071 /* After; this piece is done */
1077 _last_video_time = time;
1078 _last_video_eyes = EYES_LEFT;
1079 _last_audio_time = time;
1081 _last_video_time = optional<DCPTime>();
1082 _last_video_eyes = optional<Eyes>();
1083 _last_audio_time = optional<DCPTime>();
1086 _black.set_position (time);
1087 _silent.set_position (time);
1089 _last_video.clear ();
1093 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1095 /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1096 player before the video that requires them.
1098 _delay.push_back (make_pair (pv, time));
1100 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1101 _last_video_time = time + one_video_frame();
1103 _last_video_eyes = increment_eyes (pv->eyes());
1105 if (_delay.size() < 3) {
1109 pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1111 do_emit_video (to_do.first, to_do.second);
1115 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1117 if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1118 for (int i = 0; i < TEXT_COUNT; ++i) {
1119 _active_texts[i].clear_before (time);
1123 optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1125 pv->set_text (subtitles.get ());
1132 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1134 /* Log if the assert below is about to fail */
1135 if (_last_audio_time && time != *_last_audio_time) {
1136 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1139 /* This audio must follow on from the previous */
1140 DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1142 _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1146 Player::fill_audio (DCPTimePeriod period)
1148 if (period.from == period.to) {
1152 DCPOMATIC_ASSERT (period.from < period.to);
1154 DCPTime t = period.from;
1155 while (t < period.to) {
1156 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1157 Frame const samples = block.frames_round(_film->audio_frame_rate());
1159 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1160 silence->make_silent ();
1161 emit_audio (silence, t);
1168 Player::one_video_frame () const
1170 return DCPTime::from_frames (1, _film->video_frame_rate ());
1173 pair<shared_ptr<AudioBuffers>, DCPTime>
1174 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1176 DCPTime const discard_time = discard_to - time;
1177 Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1178 Frame remaining_frames = audio->frames() - discard_frames;
1179 if (remaining_frames <= 0) {
1180 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1182 shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1183 cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1184 return make_pair(cut, time + discard_time);
1188 Player::set_dcp_decode_reduction (optional<int> reduction)
1191 boost::mutex::scoped_lock lm (_mutex);
1193 if (reduction == _dcp_decode_reduction) {
1197 _dcp_decode_reduction = reduction;
1198 setup_pieces_unlocked ();
1201 Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1205 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1207 boost::mutex::scoped_lock lm (_mutex);
1209 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1210 if (i->content == content) {
1211 return content_time_to_dcp (i, t);
1215 /* We couldn't find this content; perhaps things are being changed over */
1216 return optional<DCPTime>();