Punt on transferring old decoder stuff.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
83 #endif
84 using namespace dcpomatic;
85
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92
93 Player::Player (shared_ptr<const Film> film)
94         : _film (film)
95         , _suspended (0)
96         , _ignore_video (false)
97         , _ignore_audio (false)
98         , _ignore_text (false)
99         , _always_burn_open_subtitles (false)
100         , _fast (false)
101         , _tolerant (film->tolerant())
102         , _play_referenced (false)
103         , _audio_merger (_film->audio_frame_rate())
104         , _shuffler (0)
105 {
106         construct ();
107 }
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _ignore_video (false)
114         , _ignore_audio (false)
115         , _ignore_text (false)
116         , _always_burn_open_subtitles (false)
117         , _fast (false)
118         , _tolerant (film->tolerant())
119         , _play_referenced (false)
120         , _audio_merger (_film->audio_frame_rate())
121         , _shuffler (0)
122 {
123         construct ();
124 }
125
126 void
127 Player::construct ()
128 {
129         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130         /* The butler must hear about this first, so since we are proxying this through to the butler we must
131            be first.
132         */
133         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135         set_video_container_size (_film->frame_size ());
136
137         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
138
139         setup_pieces ();
140         seek (DCPTime (), true);
141 }
142
143 Player::~Player ()
144 {
145         delete _shuffler;
146 }
147
148 void
149 Player::setup_pieces ()
150 {
151         boost::mutex::scoped_lock lm (_mutex);
152         setup_pieces_unlocked ();
153 }
154
155
156 bool
157 have_video (shared_ptr<const Content> content)
158 {
159         return static_cast<bool>(content->video) && content->video->use();
160 }
161
162 bool
163 have_audio (shared_ptr<const Content> content)
164 {
165         return static_cast<bool>(content->audio);
166 }
167
168 void
169 Player::setup_pieces_unlocked ()
170 {
171         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
172
173         list<shared_ptr<Piece> > old_pieces = _pieces;
174         _pieces.clear ();
175
176         delete _shuffler;
177         _shuffler = new Shuffler();
178         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
179
180         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
181
182                 if (!i->paths_valid ()) {
183                         continue;
184                 }
185
186                 if (_ignore_video && _ignore_audio && i->text.empty()) {
187                         /* We're only interested in text and this content has none */
188                         continue;
189                 }
190
191                 shared_ptr<Decoder> old_decoder;
192                 /* XXX: needs to check vector of Content and use the old decoders, but
193                  * this will all be different as we have to coalesce content before
194                  * this happens.
195                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
196                         if (j->content == i) {
197                                 old_decoder = j->decoder;
198                                 break;
199                         }
200                 }
201                 */
202
203                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
204                 DCPOMATIC_ASSERT (decoder);
205
206                 FrameRateChange frc (_film, i);
207
208                 if (decoder->video && _ignore_video) {
209                         decoder->video->set_ignore (true);
210                 }
211
212                 if (decoder->audio && _ignore_audio) {
213                         decoder->audio->set_ignore (true);
214                 }
215
216                 if (_ignore_text) {
217                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
218                                 i->set_ignore (true);
219                         }
220                 }
221
222                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
223                 if (dcp) {
224                         dcp->set_decode_referenced (_play_referenced);
225                         if (_play_referenced) {
226                                 dcp->set_forced_reduction (_dcp_decode_reduction);
227                         }
228                 }
229
230                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
231                 _pieces.push_back (piece);
232
233                 if (decoder->video) {
234                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
235                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
236                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
237                         } else {
238                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
239                         }
240                 }
241
242                 if (decoder->audio) {
243                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
244                 }
245
246                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
247
248                 while (j != decoder->text.end()) {
249                         (*j)->BitmapStart.connect (
250                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252                         (*j)->PlainStart.connect (
253                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
254                                 );
255                         (*j)->Stop.connect (
256                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
257                                 );
258
259                         ++j;
260                 }
261
262                 if (decoder->atmos) {
263                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
264                 }
265         }
266
267         _stream_states.clear ();
268         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
269                 if (i->content->audio) {
270                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
271                                 _stream_states[j] = StreamState (i, i->content->position ());
272                         }
273                 }
274         }
275
276         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
277         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
278
279         _last_video_time = DCPTime ();
280         _last_video_eyes = EYES_BOTH;
281         _last_audio_time = DCPTime ();
282 }
283
284 void
285 Player::playlist_content_change (ChangeType type, int property, bool frequent)
286 {
287         if (property == VideoContentProperty::CROP) {
288                 if (type == CHANGE_TYPE_DONE) {
289                         dcp::Size const vcs = video_container_size();
290                         boost::mutex::scoped_lock lm (_mutex);
291                         for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
292                                 i->first->reset_metadata (_film, vcs);
293                         }
294                 }
295         } else {
296                 if (type == CHANGE_TYPE_PENDING) {
297                         /* The player content is probably about to change, so we can't carry on
298                            until that has happened and we've rebuilt our pieces.  Stop pass()
299                            and seek() from working until then.
300                         */
301                         ++_suspended;
302                 } else if (type == CHANGE_TYPE_DONE) {
303                         /* A change in our content has gone through.  Re-build our pieces. */
304                         setup_pieces ();
305                         --_suspended;
306                 } else if (type == CHANGE_TYPE_CANCELLED) {
307                         --_suspended;
308                 }
309         }
310
311         Change (type, property, frequent);
312 }
313
314 void
315 Player::set_video_container_size (dcp::Size s)
316 {
317         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318
319         {
320                 boost::mutex::scoped_lock lm (_mutex);
321
322                 if (s == _video_container_size) {
323                         lm.unlock ();
324                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
325                         return;
326                 }
327
328                 _video_container_size = s;
329
330                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
331                 _black_image->make_black ();
332         }
333
334         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
335 }
336
337 void
338 Player::playlist_change (ChangeType type)
339 {
340         if (type == CHANGE_TYPE_DONE) {
341                 setup_pieces ();
342         }
343         Change (type, PlayerProperty::PLAYLIST, false);
344 }
345
346 void
347 Player::film_change (ChangeType type, Film::Property p)
348 {
349         /* Here we should notice Film properties that affect our output, and
350            alert listeners that our output now would be different to how it was
351            last time we were run.
352         */
353
354         if (p == Film::CONTAINER) {
355                 Change (type, PlayerProperty::FILM_CONTAINER, false);
356         } else if (p == Film::VIDEO_FRAME_RATE) {
357                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
358                    so we need new pieces here.
359                 */
360                 if (type == CHANGE_TYPE_DONE) {
361                         setup_pieces ();
362                 }
363                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
364         } else if (p == Film::AUDIO_PROCESSOR) {
365                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
366                         boost::mutex::scoped_lock lm (_mutex);
367                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
368                 }
369         } else if (p == Film::AUDIO_CHANNELS) {
370                 if (type == CHANGE_TYPE_DONE) {
371                         boost::mutex::scoped_lock lm (_mutex);
372                         _audio_merger.clear ();
373                 }
374         }
375 }
376
377 shared_ptr<PlayerVideo>
378 Player::black_player_video_frame (Eyes eyes) const
379 {
380         return shared_ptr<PlayerVideo> (
381                 new PlayerVideo (
382                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
383                         Crop (),
384                         optional<double> (),
385                         _video_container_size,
386                         _video_container_size,
387                         eyes,
388                         PART_WHOLE,
389                         PresetColourConversion::all().front().conversion,
390                         VIDEO_RANGE_FULL,
391                         boost::weak_ptr<Content>(),
392                         boost::optional<Frame>(),
393                         false
394                 )
395         );
396 }
397
398 Frame
399 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
400 {
401         DCPTime s = t - piece->content->position ();
402         s = min (piece->content->length_after_trim(_film), s);
403         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
404
405         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
406            then convert that ContentTime to frames at the content's rate.  However this fails for
407            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
408            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
409
410            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
411         */
412         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
413 }
414
415 DCPTime
416 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
417 {
418         /* See comment in dcp_to_content_video */
419         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
420         return d + piece->content->position();
421 }
422
423 Frame
424 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
425 {
426         DCPTime s = t - piece->content->position ();
427         s = min (piece->content->length_after_trim(_film), s);
428         /* See notes in dcp_to_content_video */
429         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
430 }
431
432 DCPTime
433 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
434 {
435         /* See comment in dcp_to_content_video */
436         return DCPTime::from_frames (f, _film->audio_frame_rate())
437                 - DCPTime (piece->content->trim_start(), piece->frc)
438                 + piece->content->position();
439 }
440
441 ContentTime
442 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
443 {
444         DCPTime s = t - piece->content->position ();
445         s = min (piece->content->length_after_trim(_film), s);
446         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
447 }
448
449 DCPTime
450 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
451 {
452         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
453 }
454
455 list<shared_ptr<Font> >
456 Player::get_subtitle_fonts ()
457 {
458         boost::mutex::scoped_lock lm (_mutex);
459
460         list<shared_ptr<Font> > fonts;
461         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
462                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
463                         /* XXX: things may go wrong if there are duplicate font IDs
464                            with different font files.
465                         */
466                         list<shared_ptr<Font> > f = j->fonts ();
467                         copy (f.begin(), f.end(), back_inserter (fonts));
468                 }
469         }
470
471         return fonts;
472 }
473
474 /** Set this player never to produce any video data */
475 void
476 Player::set_ignore_video ()
477 {
478         boost::mutex::scoped_lock lm (_mutex);
479         _ignore_video = true;
480         setup_pieces_unlocked ();
481 }
482
483 void
484 Player::set_ignore_audio ()
485 {
486         boost::mutex::scoped_lock lm (_mutex);
487         _ignore_audio = true;
488         setup_pieces_unlocked ();
489 }
490
491 void
492 Player::set_ignore_text ()
493 {
494         boost::mutex::scoped_lock lm (_mutex);
495         _ignore_text = true;
496         setup_pieces_unlocked ();
497 }
498
499 /** Set the player to always burn open texts into the image regardless of the content settings */
500 void
501 Player::set_always_burn_open_subtitles ()
502 {
503         boost::mutex::scoped_lock lm (_mutex);
504         _always_burn_open_subtitles = true;
505 }
506
507 /** Sets up the player to be faster, possibly at the expense of quality */
508 void
509 Player::set_fast ()
510 {
511         boost::mutex::scoped_lock lm (_mutex);
512         _fast = true;
513         setup_pieces_unlocked ();
514 }
515
516 void
517 Player::set_play_referenced ()
518 {
519         boost::mutex::scoped_lock lm (_mutex);
520         _play_referenced = true;
521         setup_pieces_unlocked ();
522 }
523
524 static void
525 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
526 {
527         DCPOMATIC_ASSERT (r);
528         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
529         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
530         if (r->actual_duration() > 0) {
531                 a.push_back (
532                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
533                         );
534         }
535 }
536
537 list<ReferencedReelAsset>
538 Player::get_reel_assets ()
539 {
540         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
541
542         list<ReferencedReelAsset> a;
543
544         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
545                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
546                 if (!j) {
547                         continue;
548                 }
549
550                 scoped_ptr<DCPDecoder> decoder;
551                 try {
552                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
553                 } catch (...) {
554                         return a;
555                 }
556
557                 DCPOMATIC_ASSERT (j->video_frame_rate ());
558                 double const cfr = j->video_frame_rate().get();
559                 Frame const trim_start = j->trim_start().frames_round (cfr);
560                 Frame const trim_end = j->trim_end().frames_round (cfr);
561                 int const ffr = _film->video_frame_rate ();
562
563                 /* position in the asset from the start */
564                 int64_t offset_from_start = 0;
565                 /* position in the asset from the end */
566                 int64_t offset_from_end = 0;
567                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
568                         /* Assume that main picture duration is the length of the reel */
569                         offset_from_end += k->main_picture()->actual_duration();
570                 }
571
572                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
573
574                         /* Assume that main picture duration is the length of the reel */
575                         int64_t const reel_duration = k->main_picture()->actual_duration();
576
577                         /* See doc/design/trim_reels.svg */
578                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
579                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
580
581                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
582                         if (j->reference_video ()) {
583                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
584                         }
585
586                         if (j->reference_audio ()) {
587                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
588                         }
589
590                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
591                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
592                         }
593
594                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
595                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
596                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
597                                 }
598                         }
599
600                         offset_from_start += reel_duration;
601                         offset_from_end -= reel_duration;
602                 }
603         }
604
605         return a;
606 }
607
608 bool
609 Player::pass ()
610 {
611         boost::mutex::scoped_lock lm (_mutex);
612
613         if (_suspended) {
614                 /* We can't pass in this state */
615                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
616                 return false;
617         }
618
619         if (_playback_length == DCPTime()) {
620                 /* Special; just give one black frame */
621                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
622                 return true;
623         }
624
625         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
626
627         shared_ptr<Piece> earliest_content;
628         optional<DCPTime> earliest_time;
629
630         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
631                 if (i->done) {
632                         continue;
633                 }
634
635                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
636                 if (t > i->content->end(_film)) {
637                         i->done = true;
638                 } else {
639
640                         /* Given two choices at the same time, pick the one with texts so we see it before
641                            the video.
642                         */
643                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
644                                 earliest_time = t;
645                                 earliest_content = i;
646                         }
647                 }
648         }
649
650         bool done = false;
651
652         enum {
653                 NONE,
654                 CONTENT,
655                 BLACK,
656                 SILENT
657         } which = NONE;
658
659         if (earliest_content) {
660                 which = CONTENT;
661         }
662
663         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
664                 earliest_time = _black.position ();
665                 which = BLACK;
666         }
667
668         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
669                 earliest_time = _silent.position ();
670                 which = SILENT;
671         }
672
673         switch (which) {
674         case CONTENT:
675         {
676                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
677                 earliest_content->done = earliest_content->decoder->pass ();
678                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
679                 if (dcp && !_play_referenced && dcp->reference_audio()) {
680                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
681                            to `hide' the fact that no audio was emitted during the referenced DCP (though
682                            we need to behave as though it was).
683                         */
684                         _last_audio_time = dcp->end (_film);
685                 }
686                 break;
687         }
688         case BLACK:
689                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
690                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
691                 _black.set_position (_black.position() + one_video_frame());
692                 break;
693         case SILENT:
694         {
695                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
696                 DCPTimePeriod period (_silent.period_at_position());
697                 if (_last_audio_time) {
698                         /* Sometimes the thing that happened last finishes fractionally before
699                            or after this silence.  Bodge the start time of the silence to fix it.
700                            I think this is nothing to worry about since we will just add or
701                            remove a little silence at the end of some content.
702                         */
703                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
704                         /* Let's not worry about less than a frame at 24fps */
705                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
706                         if (error >= too_much_error) {
707                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
708                         }
709                         DCPOMATIC_ASSERT (error < too_much_error);
710                         period.from = *_last_audio_time;
711                 }
712                 if (period.duration() > one_video_frame()) {
713                         period.to = period.from + one_video_frame();
714                 }
715                 fill_audio (period);
716                 _silent.set_position (period.to);
717                 break;
718         }
719         case NONE:
720                 done = true;
721                 break;
722         }
723
724         /* Emit any audio that is ready */
725
726         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
727            of our streams, or the position of the _silent.
728         */
729         DCPTime pull_to = _playback_length;
730         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
731                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
732                         pull_to = i->second.last_push_end;
733                 }
734         }
735         if (!_silent.done() && _silent.position() < pull_to) {
736                 pull_to = _silent.position();
737         }
738
739         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
740         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
741         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
742                 if (_last_audio_time && i->second < *_last_audio_time) {
743                         /* This new data comes before the last we emitted (or the last seek); discard it */
744                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
745                         if (!cut.first) {
746                                 continue;
747                         }
748                         *i = cut;
749                 } else if (_last_audio_time && i->second > *_last_audio_time) {
750                         /* There's a gap between this data and the last we emitted; fill with silence */
751                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
752                 }
753
754                 emit_audio (i->first, i->second);
755         }
756
757         if (done) {
758                 _shuffler->flush ();
759                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
760                         do_emit_video(i->first, i->second);
761                 }
762         }
763
764         return done;
765 }
766
767 /** @return Open subtitles for the frame at the given time, converted to images */
768 optional<PositionImage>
769 Player::open_subtitles_for_frame (DCPTime time) const
770 {
771         list<PositionImage> captions;
772         int const vfr = _film->video_frame_rate();
773
774         BOOST_FOREACH (
775                 PlayerText j,
776                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
777                 ) {
778
779                 /* Bitmap subtitles */
780                 BOOST_FOREACH (BitmapText i, j.bitmap) {
781                         if (!i.image) {
782                                 continue;
783                         }
784
785                         /* i.image will already have been scaled to fit _video_container_size */
786                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
787
788                         captions.push_back (
789                                 PositionImage (
790                                         i.image,
791                                         Position<int> (
792                                                 lrint (_video_container_size.width * i.rectangle.x),
793                                                 lrint (_video_container_size.height * i.rectangle.y)
794                                                 )
795                                         )
796                                 );
797                 }
798
799                 /* String subtitles (rendered to an image) */
800                 if (!j.string.empty ()) {
801                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
802                         copy (s.begin(), s.end(), back_inserter (captions));
803                 }
804         }
805
806         if (captions.empty ()) {
807                 return optional<PositionImage> ();
808         }
809
810         return merge (captions);
811 }
812
813 void
814 Player::video (weak_ptr<Piece> wp, ContentVideo video)
815 {
816         shared_ptr<Piece> piece = wp.lock ();
817         if (!piece) {
818                 return;
819         }
820
821         if (!piece->content->video->use()) {
822                 return;
823         }
824
825         FrameRateChange frc (_film, piece->content);
826         if (frc.skip && (video.frame % 2) == 1) {
827                 return;
828         }
829
830         /* Time of the first frame we will emit */
831         DCPTime const time = content_video_to_dcp (piece, video.frame);
832         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
833
834         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
835            if it's after the content's period here as in that case we still need to fill any gap between
836            `now' and the end of the content's period.
837         */
838         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
839                 return;
840         }
841
842         /* Fill gaps that we discover now that we have some video which needs to be emitted.
843            This is where we need to fill to.
844         */
845         DCPTime fill_to = min (time, piece->content->end(_film));
846
847         if (_last_video_time) {
848                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
849
850                 /* Fill if we have more than half a frame to do */
851                 if ((fill_to - fill_from) > one_video_frame() / 2) {
852                         LastVideoMap::const_iterator last = _last_video.find (wp);
853                         if (_film->three_d()) {
854                                 Eyes fill_to_eyes = video.eyes;
855                                 if (fill_to_eyes == EYES_BOTH) {
856                                         fill_to_eyes = EYES_LEFT;
857                                 }
858                                 if (fill_to == piece->content->end(_film)) {
859                                         /* Don't fill after the end of the content */
860                                         fill_to_eyes = EYES_LEFT;
861                                 }
862                                 DCPTime j = fill_from;
863                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
864                                 if (eyes == EYES_BOTH) {
865                                         eyes = EYES_LEFT;
866                                 }
867                                 while (j < fill_to || eyes != fill_to_eyes) {
868                                         if (last != _last_video.end()) {
869                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
870                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
871                                                 copy->set_eyes (eyes);
872                                                 emit_video (copy, j);
873                                         } else {
874                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
875                                                 emit_video (black_player_video_frame(eyes), j);
876                                         }
877                                         if (eyes == EYES_RIGHT) {
878                                                 j += one_video_frame();
879                                         }
880                                         eyes = increment_eyes (eyes);
881                                 }
882                         } else {
883                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
884                                         if (last != _last_video.end()) {
885                                                 emit_video (last->second, j);
886                                         } else {
887                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
888                                         }
889                                 }
890                         }
891                 }
892         }
893
894         _last_video[wp].reset (
895                 new PlayerVideo (
896                         video.image,
897                         piece->content->video->crop (),
898                         piece->content->video->fade (_film, video.frame),
899                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
900                         _video_container_size,
901                         video.eyes,
902                         video.part,
903                         piece->content->video->colour_conversion(),
904                         piece->content->video->range(),
905                         piece->content,
906                         video.frame,
907                         false
908                         )
909                 );
910
911         DCPTime t = time;
912         for (int i = 0; i < frc.repeat; ++i) {
913                 if (t < piece->content->end(_film)) {
914                         emit_video (_last_video[wp], t);
915                 }
916                 t += one_video_frame ();
917         }
918 }
919
920 void
921 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
922 {
923         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
924
925         shared_ptr<Piece> piece = wp.lock ();
926         if (!piece) {
927                 return;
928         }
929
930         shared_ptr<AudioContent> content = piece->content->audio;
931         DCPOMATIC_ASSERT (content);
932
933         int const rfr = content->resampled_frame_rate (_film);
934
935         /* Compute time in the DCP */
936         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
937         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
938
939         /* And the end of this block in the DCP */
940         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
941
942         /* Remove anything that comes before the start or after the end of the content */
943         if (time < piece->content->position()) {
944                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
945                 if (!cut.first) {
946                         /* This audio is entirely discarded */
947                         return;
948                 }
949                 content_audio.audio = cut.first;
950                 time = cut.second;
951         } else if (time > piece->content->end(_film)) {
952                 /* Discard it all */
953                 return;
954         } else if (end > piece->content->end(_film)) {
955                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
956                 if (remaining_frames == 0) {
957                         return;
958                 }
959                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
960         }
961
962         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
963
964         /* Gain */
965
966         if (content->gain() != 0) {
967                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
968                 gain->apply_gain (content->gain ());
969                 content_audio.audio = gain;
970         }
971
972         /* Remap */
973
974         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
975
976         /* Process */
977
978         if (_audio_processor) {
979                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
980         }
981
982         /* Push */
983
984         _audio_merger.push (content_audio.audio, time);
985         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
986         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
987 }
988
989 void
990 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
991 {
992         shared_ptr<Piece> piece = wp.lock ();
993         shared_ptr<const TextContent> text = wc.lock ();
994         if (!piece || !text) {
995                 return;
996         }
997
998         /* Apply content's subtitle offsets */
999         subtitle.sub.rectangle.x += text->x_offset ();
1000         subtitle.sub.rectangle.y += text->y_offset ();
1001
1002         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1003         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1004         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1005
1006         /* Apply content's subtitle scale */
1007         subtitle.sub.rectangle.width *= text->x_scale ();
1008         subtitle.sub.rectangle.height *= text->y_scale ();
1009
1010         PlayerText ps;
1011         shared_ptr<Image> image = subtitle.sub.image;
1012
1013         /* We will scale the subtitle up to fit _video_container_size */
1014         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1015         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1016         if (width == 0 || height == 0) {
1017                 return;
1018         }
1019
1020         dcp::Size scaled_size (width, height);
1021         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1022         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1023
1024         _active_texts[text->type()].add_from (wc, ps, from);
1025 }
1026
1027 void
1028 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1029 {
1030         shared_ptr<Piece> piece = wp.lock ();
1031         shared_ptr<const TextContent> text = wc.lock ();
1032         if (!piece || !text) {
1033                 return;
1034         }
1035
1036         PlayerText ps;
1037         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1038
1039         if (from > piece->content->end(_film)) {
1040                 return;
1041         }
1042
1043         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1044                 s.set_h_position (s.h_position() + text->x_offset ());
1045                 s.set_v_position (s.v_position() + text->y_offset ());
1046                 float const xs = text->x_scale();
1047                 float const ys = text->y_scale();
1048                 float size = s.size();
1049
1050                 /* Adjust size to express the common part of the scaling;
1051                    e.g. if xs = ys = 0.5 we scale size by 2.
1052                 */
1053                 if (xs > 1e-5 && ys > 1e-5) {
1054                         size *= 1 / min (1 / xs, 1 / ys);
1055                 }
1056                 s.set_size (size);
1057
1058                 /* Then express aspect ratio changes */
1059                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1060                         s.set_aspect_adjust (xs / ys);
1061                 }
1062
1063                 s.set_in (dcp::Time(from.seconds(), 1000));
1064                 ps.string.push_back (StringText (s, text->outline_width()));
1065                 ps.add_fonts (text->fonts ());
1066         }
1067
1068         _active_texts[text->type()].add_from (wc, ps, from);
1069 }
1070
1071 void
1072 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1073 {
1074         shared_ptr<const TextContent> text = wc.lock ();
1075         if (!text) {
1076                 return;
1077         }
1078
1079         if (!_active_texts[text->type()].have(wc)) {
1080                 return;
1081         }
1082
1083         shared_ptr<Piece> piece = wp.lock ();
1084         if (!piece) {
1085                 return;
1086         }
1087
1088         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1089
1090         if (dcp_to > piece->content->end(_film)) {
1091                 return;
1092         }
1093
1094         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1095
1096         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1097         if (text->use() && !always && !text->burn()) {
1098                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1099         }
1100 }
1101
1102 void
1103 Player::seek (DCPTime time, bool accurate)
1104 {
1105         boost::mutex::scoped_lock lm (_mutex);
1106         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1107
1108         if (_suspended) {
1109                 /* We can't seek in this state */
1110                 return;
1111         }
1112
1113         if (_shuffler) {
1114                 _shuffler->clear ();
1115         }
1116
1117         _delay.clear ();
1118
1119         if (_audio_processor) {
1120                 _audio_processor->flush ();
1121         }
1122
1123         _audio_merger.clear ();
1124         for (int i = 0; i < TEXT_COUNT; ++i) {
1125                 _active_texts[i].clear ();
1126         }
1127
1128         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1129                 if (time < i->content->position()) {
1130                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1131                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1132                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1133                            been trimmed to a point between keyframes, or something).
1134                         */
1135                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1136                         i->done = false;
1137                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1138                         /* During; seek to position */
1139                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1140                         i->done = false;
1141                 } else {
1142                         /* After; this piece is done */
1143                         i->done = true;
1144                 }
1145         }
1146
1147         if (accurate) {
1148                 _last_video_time = time;
1149                 _last_video_eyes = EYES_LEFT;
1150                 _last_audio_time = time;
1151         } else {
1152                 _last_video_time = optional<DCPTime>();
1153                 _last_video_eyes = optional<Eyes>();
1154                 _last_audio_time = optional<DCPTime>();
1155         }
1156
1157         _black.set_position (time);
1158         _silent.set_position (time);
1159
1160         _last_video.clear ();
1161 }
1162
1163 void
1164 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1165 {
1166         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1167            player before the video that requires them.
1168         */
1169         _delay.push_back (make_pair (pv, time));
1170
1171         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1172                 _last_video_time = time + one_video_frame();
1173         }
1174         _last_video_eyes = increment_eyes (pv->eyes());
1175
1176         if (_delay.size() < 3) {
1177                 return;
1178         }
1179
1180         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1181         _delay.pop_front();
1182         do_emit_video (to_do.first, to_do.second);
1183 }
1184
1185 void
1186 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1187 {
1188         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1189                 for (int i = 0; i < TEXT_COUNT; ++i) {
1190                         _active_texts[i].clear_before (time);
1191                 }
1192         }
1193
1194         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1195         if (subtitles) {
1196                 pv->set_text (subtitles.get ());
1197         }
1198
1199         Video (pv, time);
1200 }
1201
1202 void
1203 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1204 {
1205         /* Log if the assert below is about to fail */
1206         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1207                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1208         }
1209
1210         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1211         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1212         Audio (data, time, _film->audio_frame_rate());
1213         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1214 }
1215
1216 void
1217 Player::fill_audio (DCPTimePeriod period)
1218 {
1219         if (period.from == period.to) {
1220                 return;
1221         }
1222
1223         DCPOMATIC_ASSERT (period.from < period.to);
1224
1225         DCPTime t = period.from;
1226         while (t < period.to) {
1227                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1228                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1229                 if (samples) {
1230                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1231                         silence->make_silent ();
1232                         emit_audio (silence, t);
1233                 }
1234                 t += block;
1235         }
1236 }
1237
1238 DCPTime
1239 Player::one_video_frame () const
1240 {
1241         return DCPTime::from_frames (1, _film->video_frame_rate ());
1242 }
1243
1244 pair<shared_ptr<AudioBuffers>, DCPTime>
1245 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1246 {
1247         DCPTime const discard_time = discard_to - time;
1248         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1249         Frame remaining_frames = audio->frames() - discard_frames;
1250         if (remaining_frames <= 0) {
1251                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1252         }
1253         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1254         return make_pair(cut, time + discard_time);
1255 }
1256
1257 void
1258 Player::set_dcp_decode_reduction (optional<int> reduction)
1259 {
1260         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1261
1262         {
1263                 boost::mutex::scoped_lock lm (_mutex);
1264
1265                 if (reduction == _dcp_decode_reduction) {
1266                         lm.unlock ();
1267                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1268                         return;
1269                 }
1270
1271                 _dcp_decode_reduction = reduction;
1272                 setup_pieces_unlocked ();
1273         }
1274
1275         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1276 }
1277
1278 optional<DCPTime>
1279 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1280 {
1281         boost::mutex::scoped_lock lm (_mutex);
1282
1283         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1284                 if (i->content == content) {
1285                         return content_time_to_dcp (i, t);
1286                 }
1287         }
1288
1289         /* We couldn't find this content; perhaps things are being changed over */
1290         return optional<DCPTime>();
1291 }
1292
1293
1294 shared_ptr<const Playlist>
1295 Player::playlist () const
1296 {
1297         return _playlist ? _playlist : _film->playlist();
1298 }
1299
1300
1301 void
1302 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1303 {
1304         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1305 }
1306