Nicer fix for 86102d30bf0aad89115bbeb3d8aaa2a27a0aa432
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::PLAYBACK_LENGTH = 705;
87
88 Player::Player (shared_ptr<const Film> film)
89         : _film (film)
90         , _suspended (0)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _tolerant (film->tolerant())
97         , _play_referenced (false)
98         , _audio_merger (_film->audio_frame_rate())
99         , _shuffler (0)
100 {
101         construct ();
102 }
103
104 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
105         : _film (film)
106         , _playlist (playlist_)
107         , _suspended (0)
108         , _ignore_video (false)
109         , _ignore_audio (false)
110         , _ignore_text (false)
111         , _always_burn_open_subtitles (false)
112         , _fast (false)
113         , _tolerant (film->tolerant())
114         , _play_referenced (false)
115         , _audio_merger (_film->audio_frame_rate())
116         , _shuffler (0)
117 {
118         construct ();
119 }
120
121 void
122 Player::construct ()
123 {
124         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125         /* The butler must hear about this first, so since we are proxying this through to the butler we must
126            be first.
127         */
128         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130         set_video_container_size (_film->frame_size ());
131
132         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
133
134         setup_pieces ();
135         seek (DCPTime (), true);
136 }
137
138 Player::~Player ()
139 {
140         delete _shuffler;
141 }
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video);
155 }
156
157 bool
158 have_audio (shared_ptr<const Content> content)
159 {
160         return static_cast<bool>(content->audio);
161 }
162
163 void
164 Player::setup_pieces_unlocked ()
165 {
166         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
167
168         list<shared_ptr<Piece> > old_pieces = _pieces;
169         _pieces.clear ();
170
171         delete _shuffler;
172         _shuffler = new Shuffler();
173         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
174
175         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
176
177                 if (!i->paths_valid ()) {
178                         continue;
179                 }
180
181                 if (_ignore_video && _ignore_audio && i->text.empty()) {
182                         /* We're only interested in text and this content has none */
183                         continue;
184                 }
185
186                 shared_ptr<Decoder> old_decoder;
187                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
188                         if (j->content == i) {
189                                 old_decoder = j->decoder;
190                                 break;
191                         }
192                 }
193
194                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
195                 FrameRateChange frc (_film, i);
196
197                 if (!decoder) {
198                         /* Not something that we can decode; e.g. Atmos content */
199                         continue;
200                 }
201
202                 if (decoder->video && _ignore_video) {
203                         decoder->video->set_ignore (true);
204                 }
205
206                 if (decoder->audio && _ignore_audio) {
207                         decoder->audio->set_ignore (true);
208                 }
209
210                 if (_ignore_text) {
211                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
212                                 i->set_ignore (true);
213                         }
214                 }
215
216                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217                 if (dcp) {
218                         dcp->set_decode_referenced (_play_referenced);
219                         if (_play_referenced) {
220                                 dcp->set_forced_reduction (_dcp_decode_reduction);
221                         }
222                 }
223
224                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
225                 _pieces.push_back (piece);
226
227                 if (decoder->video) {
228                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
229                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231                         } else {
232                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
233                         }
234                 }
235
236                 if (decoder->audio) {
237                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
238                 }
239
240                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
241
242                 while (j != decoder->text.end()) {
243                         (*j)->BitmapStart.connect (
244                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246                         (*j)->PlainStart.connect (
247                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248                                 );
249                         (*j)->Stop.connect (
250                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252
253                         ++j;
254                 }
255         }
256
257         _stream_states.clear ();
258         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
259                 if (i->content->audio) {
260                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
261                                 _stream_states[j] = StreamState (i, i->content->position ());
262                         }
263                 }
264         }
265
266         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
267         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
268
269         _last_video_time = DCPTime ();
270         _last_video_eyes = EYES_BOTH;
271         _last_audio_time = DCPTime ();
272 }
273
274 void
275 Player::playlist_content_change (ChangeType type, int property, bool frequent)
276 {
277         if (type == CHANGE_TYPE_PENDING) {
278                 /* The player content is probably about to change, so we can't carry on
279                    until that has happened and we've rebuilt our pieces.  Stop pass()
280                    and seek() from working until then.
281                 */
282                 ++_suspended;
283         } else if (type == CHANGE_TYPE_DONE) {
284                 /* A change in our content has gone through.  Re-build our pieces. */
285                 setup_pieces ();
286                 --_suspended;
287         } else if (type == CHANGE_TYPE_CANCELLED) {
288                 --_suspended;
289         }
290
291         Change (type, property, frequent);
292 }
293
294 void
295 Player::set_video_container_size (dcp::Size s)
296 {
297         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
298
299         {
300                 boost::mutex::scoped_lock lm (_mutex);
301
302                 if (s == _video_container_size) {
303                         lm.unlock ();
304                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
305                         return;
306                 }
307
308                 _video_container_size = s;
309
310                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
311                 _black_image->make_black ();
312         }
313
314         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
315 }
316
317 void
318 Player::playlist_change (ChangeType type)
319 {
320         if (type == CHANGE_TYPE_DONE) {
321                 setup_pieces ();
322         }
323         Change (type, PlayerProperty::PLAYLIST, false);
324 }
325
326 void
327 Player::film_change (ChangeType type, Film::Property p)
328 {
329         /* Here we should notice Film properties that affect our output, and
330            alert listeners that our output now would be different to how it was
331            last time we were run.
332         */
333
334         if (p == Film::CONTAINER) {
335                 Change (type, PlayerProperty::FILM_CONTAINER, false);
336         } else if (p == Film::VIDEO_FRAME_RATE) {
337                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
338                    so we need new pieces here.
339                 */
340                 if (type == CHANGE_TYPE_DONE) {
341                         setup_pieces ();
342                 }
343                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
344         } else if (p == Film::AUDIO_PROCESSOR) {
345                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
346                         boost::mutex::scoped_lock lm (_mutex);
347                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
348                 }
349         } else if (p == Film::AUDIO_CHANNELS) {
350                 if (type == CHANGE_TYPE_DONE) {
351                         boost::mutex::scoped_lock lm (_mutex);
352                         _audio_merger.clear ();
353                 }
354         }
355 }
356
357 shared_ptr<PlayerVideo>
358 Player::black_player_video_frame (Eyes eyes) const
359 {
360         return shared_ptr<PlayerVideo> (
361                 new PlayerVideo (
362                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
363                         Crop (),
364                         optional<double> (),
365                         _video_container_size,
366                         _video_container_size,
367                         eyes,
368                         PART_WHOLE,
369                         PresetColourConversion::all().front().conversion,
370                         VIDEO_RANGE_FULL,
371                         boost::weak_ptr<Content>(),
372                         boost::optional<Frame>(),
373                         false
374                 )
375         );
376 }
377
378 Frame
379 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
380 {
381         DCPTime s = t - piece->content->position ();
382         s = min (piece->content->length_after_trim(_film), s);
383         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
384
385         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
386            then convert that ContentTime to frames at the content's rate.  However this fails for
387            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
388            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
389
390            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
391         */
392         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
393 }
394
395 DCPTime
396 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
397 {
398         /* See comment in dcp_to_content_video */
399         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
400         return d + piece->content->position();
401 }
402
403 Frame
404 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
405 {
406         DCPTime s = t - piece->content->position ();
407         s = min (piece->content->length_after_trim(_film), s);
408         /* See notes in dcp_to_content_video */
409         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
410 }
411
412 DCPTime
413 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
414 {
415         /* See comment in dcp_to_content_video */
416         return DCPTime::from_frames (f, _film->audio_frame_rate())
417                 - DCPTime (piece->content->trim_start(), piece->frc)
418                 + piece->content->position();
419 }
420
421 ContentTime
422 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
423 {
424         DCPTime s = t - piece->content->position ();
425         s = min (piece->content->length_after_trim(_film), s);
426         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
427 }
428
429 DCPTime
430 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
431 {
432         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
433 }
434
435 list<shared_ptr<Font> >
436 Player::get_subtitle_fonts ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439
440         list<shared_ptr<Font> > fonts;
441         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
442                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
443                         /* XXX: things may go wrong if there are duplicate font IDs
444                            with different font files.
445                         */
446                         list<shared_ptr<Font> > f = j->fonts ();
447                         copy (f.begin(), f.end(), back_inserter (fonts));
448                 }
449         }
450
451         return fonts;
452 }
453
454 /** Set this player never to produce any video data */
455 void
456 Player::set_ignore_video ()
457 {
458         boost::mutex::scoped_lock lm (_mutex);
459         _ignore_video = true;
460         setup_pieces_unlocked ();
461 }
462
463 void
464 Player::set_ignore_audio ()
465 {
466         boost::mutex::scoped_lock lm (_mutex);
467         _ignore_audio = true;
468         setup_pieces_unlocked ();
469 }
470
471 void
472 Player::set_ignore_text ()
473 {
474         boost::mutex::scoped_lock lm (_mutex);
475         _ignore_text = true;
476         setup_pieces_unlocked ();
477 }
478
479 /** Set the player to always burn open texts into the image regardless of the content settings */
480 void
481 Player::set_always_burn_open_subtitles ()
482 {
483         boost::mutex::scoped_lock lm (_mutex);
484         _always_burn_open_subtitles = true;
485 }
486
487 /** Sets up the player to be faster, possibly at the expense of quality */
488 void
489 Player::set_fast ()
490 {
491         boost::mutex::scoped_lock lm (_mutex);
492         _fast = true;
493         setup_pieces_unlocked ();
494 }
495
496 void
497 Player::set_play_referenced ()
498 {
499         boost::mutex::scoped_lock lm (_mutex);
500         _play_referenced = true;
501         setup_pieces_unlocked ();
502 }
503
504 static void
505 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
506 {
507         DCPOMATIC_ASSERT (r);
508         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
509         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
510         if (r->actual_duration() > 0) {
511                 a.push_back (
512                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
513                         );
514         }
515 }
516
517 list<ReferencedReelAsset>
518 Player::get_reel_assets ()
519 {
520         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
521
522         list<ReferencedReelAsset> a;
523
524         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
525                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
526                 if (!j) {
527                         continue;
528                 }
529
530                 scoped_ptr<DCPDecoder> decoder;
531                 try {
532                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
533                 } catch (...) {
534                         return a;
535                 }
536
537                 DCPOMATIC_ASSERT (j->video_frame_rate ());
538                 double const cfr = j->video_frame_rate().get();
539                 Frame const trim_start = j->trim_start().frames_round (cfr);
540                 Frame const trim_end = j->trim_end().frames_round (cfr);
541                 int const ffr = _film->video_frame_rate ();
542
543                 /* position in the asset from the start */
544                 int64_t offset_from_start = 0;
545                 /* position in the asset from the end */
546                 int64_t offset_from_end = 0;
547                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
548                         /* Assume that main picture duration is the length of the reel */
549                         offset_from_end += k->main_picture()->actual_duration();
550                 }
551
552                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
553
554                         /* Assume that main picture duration is the length of the reel */
555                         int64_t const reel_duration = k->main_picture()->actual_duration();
556
557                         /* See doc/design/trim_reels.svg */
558                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
559                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
560
561                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
562                         if (j->reference_video ()) {
563                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
564                         }
565
566                         if (j->reference_audio ()) {
567                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
568                         }
569
570                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
571                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
572                         }
573
574                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
575                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
576                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
577                                 }
578                         }
579
580                         offset_from_start += reel_duration;
581                         offset_from_end -= reel_duration;
582                 }
583         }
584
585         return a;
586 }
587
588 bool
589 Player::pass ()
590 {
591         boost::mutex::scoped_lock lm (_mutex);
592
593         if (_suspended) {
594                 /* We can't pass in this state */
595                 return false;
596         }
597
598         if (_playback_length == DCPTime()) {
599                 /* Special; just give one black frame */
600                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
601                 return true;
602         }
603
604         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
605
606         shared_ptr<Piece> earliest_content;
607         optional<DCPTime> earliest_time;
608
609         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
610                 if (i->done) {
611                         continue;
612                 }
613
614                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
615                 if (t > i->content->end(_film)) {
616                         i->done = true;
617                 } else {
618
619                         /* Given two choices at the same time, pick the one with texts so we see it before
620                            the video.
621                         */
622                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
623                                 earliest_time = t;
624                                 earliest_content = i;
625                         }
626                 }
627         }
628
629         bool done = false;
630
631         enum {
632                 NONE,
633                 CONTENT,
634                 BLACK,
635                 SILENT
636         } which = NONE;
637
638         if (earliest_content) {
639                 which = CONTENT;
640         }
641
642         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
643                 earliest_time = _black.position ();
644                 which = BLACK;
645         }
646
647         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
648                 earliest_time = _silent.position ();
649                 which = SILENT;
650         }
651
652         switch (which) {
653         case CONTENT:
654         {
655                 earliest_content->done = earliest_content->decoder->pass ();
656                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
657                 if (dcp && !_play_referenced && dcp->reference_audio()) {
658                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
659                            to `hide' the fact that no audio was emitted during the referenced DCP (though
660                            we need to behave as though it was).
661                         */
662                         _last_audio_time = dcp->end (_film);
663                 }
664                 break;
665         }
666         case BLACK:
667                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
668                 _black.set_position (_black.position() + one_video_frame());
669                 break;
670         case SILENT:
671         {
672                 DCPTimePeriod period (_silent.period_at_position());
673                 if (_last_audio_time) {
674                         /* Sometimes the thing that happened last finishes fractionally before
675                            or after this silence.  Bodge the start time of the silence to fix it.
676                            I think this is nothing to worry about since we will just add or
677                            remove a little silence at the end of some content.
678                         */
679                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
680                         /* Let's not worry about less than a frame at 24fps */
681                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
682                         if (error >= too_much_error) {
683                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
684                         }
685                         DCPOMATIC_ASSERT (error < too_much_error);
686                         period.from = *_last_audio_time;
687                 }
688                 if (period.duration() > one_video_frame()) {
689                         period.to = period.from + one_video_frame();
690                 }
691                 fill_audio (period);
692                 _silent.set_position (period.to);
693                 break;
694         }
695         case NONE:
696                 done = true;
697                 break;
698         }
699
700         /* Emit any audio that is ready */
701
702         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
703            of our streams, or the position of the _silent.
704         */
705         DCPTime pull_to = _playback_length;
706         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
707                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
708                         pull_to = i->second.last_push_end;
709                 }
710         }
711         if (!_silent.done() && _silent.position() < pull_to) {
712                 pull_to = _silent.position();
713         }
714
715         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
716         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
717                 if (_last_audio_time && i->second < *_last_audio_time) {
718                         /* This new data comes before the last we emitted (or the last seek); discard it */
719                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
720                         if (!cut.first) {
721                                 continue;
722                         }
723                         *i = cut;
724                 } else if (_last_audio_time && i->second > *_last_audio_time) {
725                         /* There's a gap between this data and the last we emitted; fill with silence */
726                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
727                 }
728
729                 emit_audio (i->first, i->second);
730         }
731
732         if (done) {
733                 _shuffler->flush ();
734                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
735                         do_emit_video(i->first, i->second);
736                 }
737         }
738
739         return done;
740 }
741
742 /** @return Open subtitles for the frame at the given time, converted to images */
743 optional<PositionImage>
744 Player::open_subtitles_for_frame (DCPTime time) const
745 {
746         list<PositionImage> captions;
747         int const vfr = _film->video_frame_rate();
748
749         BOOST_FOREACH (
750                 PlayerText j,
751                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
752                 ) {
753
754                 /* Bitmap subtitles */
755                 BOOST_FOREACH (BitmapText i, j.bitmap) {
756                         if (!i.image) {
757                                 continue;
758                         }
759
760                         /* i.image will already have been scaled to fit _video_container_size */
761                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
762
763                         captions.push_back (
764                                 PositionImage (
765                                         i.image,
766                                         Position<int> (
767                                                 lrint (_video_container_size.width * i.rectangle.x),
768                                                 lrint (_video_container_size.height * i.rectangle.y)
769                                                 )
770                                         )
771                                 );
772                 }
773
774                 /* String subtitles (rendered to an image) */
775                 if (!j.string.empty ()) {
776                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
777                         copy (s.begin(), s.end(), back_inserter (captions));
778                 }
779         }
780
781         if (captions.empty ()) {
782                 return optional<PositionImage> ();
783         }
784
785         return merge (captions);
786 }
787
788 void
789 Player::video (weak_ptr<Piece> wp, ContentVideo video)
790 {
791         shared_ptr<Piece> piece = wp.lock ();
792         if (!piece) {
793                 return;
794         }
795
796         FrameRateChange frc (_film, piece->content);
797         if (frc.skip && (video.frame % 2) == 1) {
798                 return;
799         }
800
801         /* Time of the first frame we will emit */
802         DCPTime const time = content_video_to_dcp (piece, video.frame);
803
804         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
805            if it's after the content's period here as in that case we still need to fill any gap between
806            `now' and the end of the content's period.
807         */
808         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
809                 return;
810         }
811
812         /* Fill gaps that we discover now that we have some video which needs to be emitted.
813            This is where we need to fill to.
814         */
815         DCPTime fill_to = min (time, piece->content->end(_film));
816
817         if (_last_video_time) {
818                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
819
820                 /* Fill if we have more than half a frame to do */
821                 if ((fill_to - fill_from) > one_video_frame() / 2) {
822                         LastVideoMap::const_iterator last = _last_video.find (wp);
823                         if (_film->three_d()) {
824                                 Eyes fill_to_eyes = video.eyes;
825                                 if (fill_to_eyes == EYES_BOTH) {
826                                         fill_to_eyes = EYES_LEFT;
827                                 }
828                                 if (fill_to == piece->content->end(_film)) {
829                                         /* Don't fill after the end of the content */
830                                         fill_to_eyes = EYES_LEFT;
831                                 }
832                                 DCPTime j = fill_from;
833                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
834                                 if (eyes == EYES_BOTH) {
835                                         eyes = EYES_LEFT;
836                                 }
837                                 while (j < fill_to || eyes != fill_to_eyes) {
838                                         if (last != _last_video.end()) {
839                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
840                                                 copy->set_eyes (eyes);
841                                                 emit_video (copy, j);
842                                         } else {
843                                                 emit_video (black_player_video_frame(eyes), j);
844                                         }
845                                         if (eyes == EYES_RIGHT) {
846                                                 j += one_video_frame();
847                                         }
848                                         eyes = increment_eyes (eyes);
849                                 }
850                         } else {
851                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
852                                         if (last != _last_video.end()) {
853                                                 emit_video (last->second, j);
854                                         } else {
855                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
856                                         }
857                                 }
858                         }
859                 }
860         }
861
862         _last_video[wp].reset (
863                 new PlayerVideo (
864                         video.image,
865                         piece->content->video->crop (),
866                         piece->content->video->fade (_film, video.frame),
867                         piece->content->video->scale().size (
868                                 piece->content->video, _video_container_size, _film->frame_size ()
869                                 ),
870                         _video_container_size,
871                         video.eyes,
872                         video.part,
873                         piece->content->video->colour_conversion(),
874                         piece->content->video->range(),
875                         piece->content,
876                         video.frame,
877                         false
878                         )
879                 );
880
881         DCPTime t = time;
882         for (int i = 0; i < frc.repeat; ++i) {
883                 if (t < piece->content->end(_film)) {
884                         emit_video (_last_video[wp], t);
885                 }
886                 t += one_video_frame ();
887         }
888 }
889
890 void
891 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
892 {
893         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
894
895         shared_ptr<Piece> piece = wp.lock ();
896         if (!piece) {
897                 return;
898         }
899
900         shared_ptr<AudioContent> content = piece->content->audio;
901         DCPOMATIC_ASSERT (content);
902
903         int const rfr = content->resampled_frame_rate (_film);
904
905         /* Compute time in the DCP */
906         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
907         /* And the end of this block in the DCP */
908         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
909
910         /* Remove anything that comes before the start or after the end of the content */
911         if (time < piece->content->position()) {
912                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
913                 if (!cut.first) {
914                         /* This audio is entirely discarded */
915                         return;
916                 }
917                 content_audio.audio = cut.first;
918                 time = cut.second;
919         } else if (time > piece->content->end(_film)) {
920                 /* Discard it all */
921                 return;
922         } else if (end > piece->content->end(_film)) {
923                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
924                 if (remaining_frames == 0) {
925                         return;
926                 }
927                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
928         }
929
930         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
931
932         /* Gain */
933
934         if (content->gain() != 0) {
935                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
936                 gain->apply_gain (content->gain ());
937                 content_audio.audio = gain;
938         }
939
940         /* Remap */
941
942         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
943
944         /* Process */
945
946         if (_audio_processor) {
947                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
948         }
949
950         /* Push */
951
952         _audio_merger.push (content_audio.audio, time);
953         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
954         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
955 }
956
957 void
958 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
959 {
960         shared_ptr<Piece> piece = wp.lock ();
961         shared_ptr<const TextContent> text = wc.lock ();
962         if (!piece || !text) {
963                 return;
964         }
965
966         /* Apply content's subtitle offsets */
967         subtitle.sub.rectangle.x += text->x_offset ();
968         subtitle.sub.rectangle.y += text->y_offset ();
969
970         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
971         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
972         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
973
974         /* Apply content's subtitle scale */
975         subtitle.sub.rectangle.width *= text->x_scale ();
976         subtitle.sub.rectangle.height *= text->y_scale ();
977
978         PlayerText ps;
979         shared_ptr<Image> image = subtitle.sub.image;
980
981         /* We will scale the subtitle up to fit _video_container_size */
982         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
983         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
984         if (width == 0 || height == 0) {
985                 return;
986         }
987
988         dcp::Size scaled_size (width, height);
989         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
990         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
991
992         _active_texts[text->type()].add_from (wc, ps, from);
993 }
994
995 void
996 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
997 {
998         shared_ptr<Piece> piece = wp.lock ();
999         shared_ptr<const TextContent> text = wc.lock ();
1000         if (!piece || !text) {
1001                 return;
1002         }
1003
1004         PlayerText ps;
1005         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1006
1007         if (from > piece->content->end(_film)) {
1008                 return;
1009         }
1010
1011         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1012                 s.set_h_position (s.h_position() + text->x_offset ());
1013                 s.set_v_position (s.v_position() + text->y_offset ());
1014                 float const xs = text->x_scale();
1015                 float const ys = text->y_scale();
1016                 float size = s.size();
1017
1018                 /* Adjust size to express the common part of the scaling;
1019                    e.g. if xs = ys = 0.5 we scale size by 2.
1020                 */
1021                 if (xs > 1e-5 && ys > 1e-5) {
1022                         size *= 1 / min (1 / xs, 1 / ys);
1023                 }
1024                 s.set_size (size);
1025
1026                 /* Then express aspect ratio changes */
1027                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1028                         s.set_aspect_adjust (xs / ys);
1029                 }
1030
1031                 s.set_in (dcp::Time(from.seconds(), 1000));
1032                 ps.string.push_back (StringText (s, text->outline_width()));
1033                 ps.add_fonts (text->fonts ());
1034         }
1035
1036         _active_texts[text->type()].add_from (wc, ps, from);
1037 }
1038
1039 void
1040 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1041 {
1042         shared_ptr<const TextContent> text = wc.lock ();
1043         if (!text) {
1044                 return;
1045         }
1046
1047         if (!_active_texts[text->type()].have(wc)) {
1048                 return;
1049         }
1050
1051         shared_ptr<Piece> piece = wp.lock ();
1052         if (!piece) {
1053                 return;
1054         }
1055
1056         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1057
1058         if (dcp_to > piece->content->end(_film)) {
1059                 return;
1060         }
1061
1062         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1063
1064         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1065         if (text->use() && !always && !text->burn()) {
1066                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1067         }
1068 }
1069
1070 void
1071 Player::seek (DCPTime time, bool accurate)
1072 {
1073         boost::mutex::scoped_lock lm (_mutex);
1074
1075         if (_suspended) {
1076                 /* We can't seek in this state */
1077                 return;
1078         }
1079
1080         if (_shuffler) {
1081                 _shuffler->clear ();
1082         }
1083
1084         _delay.clear ();
1085
1086         if (_audio_processor) {
1087                 _audio_processor->flush ();
1088         }
1089
1090         _audio_merger.clear ();
1091         for (int i = 0; i < TEXT_COUNT; ++i) {
1092                 _active_texts[i].clear ();
1093         }
1094
1095         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1096                 if (time < i->content->position()) {
1097                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1098                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1099                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1100                            been trimmed to a point between keyframes, or something).
1101                         */
1102                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1103                         i->done = false;
1104                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1105                         /* During; seek to position */
1106                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1107                         i->done = false;
1108                 } else {
1109                         /* After; this piece is done */
1110                         i->done = true;
1111                 }
1112         }
1113
1114         if (accurate) {
1115                 _last_video_time = time;
1116                 _last_video_eyes = EYES_LEFT;
1117                 _last_audio_time = time;
1118         } else {
1119                 _last_video_time = optional<DCPTime>();
1120                 _last_video_eyes = optional<Eyes>();
1121                 _last_audio_time = optional<DCPTime>();
1122         }
1123
1124         _black.set_position (time);
1125         _silent.set_position (time);
1126
1127         _last_video.clear ();
1128 }
1129
1130 void
1131 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1132 {
1133         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1134            player before the video that requires them.
1135         */
1136         _delay.push_back (make_pair (pv, time));
1137
1138         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1139                 _last_video_time = time + one_video_frame();
1140         }
1141         _last_video_eyes = increment_eyes (pv->eyes());
1142
1143         if (_delay.size() < 3) {
1144                 return;
1145         }
1146
1147         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1148         _delay.pop_front();
1149         do_emit_video (to_do.first, to_do.second);
1150 }
1151
1152 void
1153 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1154 {
1155         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1156                 for (int i = 0; i < TEXT_COUNT; ++i) {
1157                         _active_texts[i].clear_before (time);
1158                 }
1159         }
1160
1161         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1162         if (subtitles) {
1163                 pv->set_text (subtitles.get ());
1164         }
1165
1166         Video (pv, time);
1167 }
1168
1169 void
1170 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1171 {
1172         /* Log if the assert below is about to fail */
1173         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1174                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1175         }
1176
1177         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1178         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1179         Audio (data, time, _film->audio_frame_rate());
1180         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1181 }
1182
1183 void
1184 Player::fill_audio (DCPTimePeriod period)
1185 {
1186         if (period.from == period.to) {
1187                 return;
1188         }
1189
1190         DCPOMATIC_ASSERT (period.from < period.to);
1191
1192         DCPTime t = period.from;
1193         while (t < period.to) {
1194                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1195                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1196                 if (samples) {
1197                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1198                         silence->make_silent ();
1199                         emit_audio (silence, t);
1200                 }
1201                 t += block;
1202         }
1203 }
1204
1205 DCPTime
1206 Player::one_video_frame () const
1207 {
1208         return DCPTime::from_frames (1, _film->video_frame_rate ());
1209 }
1210
1211 pair<shared_ptr<AudioBuffers>, DCPTime>
1212 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1213 {
1214         DCPTime const discard_time = discard_to - time;
1215         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1216         Frame remaining_frames = audio->frames() - discard_frames;
1217         if (remaining_frames <= 0) {
1218                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1219         }
1220         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1221         return make_pair(cut, time + discard_time);
1222 }
1223
1224 void
1225 Player::set_dcp_decode_reduction (optional<int> reduction)
1226 {
1227         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1228
1229         {
1230                 boost::mutex::scoped_lock lm (_mutex);
1231
1232                 if (reduction == _dcp_decode_reduction) {
1233                         lm.unlock ();
1234                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1235                         return;
1236                 }
1237
1238                 _dcp_decode_reduction = reduction;
1239                 setup_pieces_unlocked ();
1240         }
1241
1242         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1243 }
1244
1245 optional<DCPTime>
1246 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1247 {
1248         boost::mutex::scoped_lock lm (_mutex);
1249
1250         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1251                 if (i->content == content) {
1252                         return content_time_to_dcp (i, t);
1253                 }
1254         }
1255
1256         /* We couldn't find this content; perhaps things are being changed over */
1257         return optional<DCPTime>();
1258 }
1259
1260
1261 shared_ptr<const Playlist>
1262 Player::playlist () const
1263 {
1264         return _playlist ? _playlist : _film->playlist();
1265 }
1266