Fix empty (black) area calculations when video is set to not be used.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::PLAYBACK_LENGTH = 705;
87
88 Player::Player (shared_ptr<const Film> film)
89         : _film (film)
90         , _suspended (0)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _tolerant (film->tolerant())
97         , _play_referenced (false)
98         , _audio_merger (_film->audio_frame_rate())
99         , _shuffler (0)
100 {
101         construct ();
102 }
103
104 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
105         : _film (film)
106         , _playlist (playlist_)
107         , _suspended (0)
108         , _ignore_video (false)
109         , _ignore_audio (false)
110         , _ignore_text (false)
111         , _always_burn_open_subtitles (false)
112         , _fast (false)
113         , _tolerant (film->tolerant())
114         , _play_referenced (false)
115         , _audio_merger (_film->audio_frame_rate())
116         , _shuffler (0)
117 {
118         construct ();
119 }
120
121 void
122 Player::construct ()
123 {
124         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125         /* The butler must hear about this first, so since we are proxying this through to the butler we must
126            be first.
127         */
128         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130         set_video_container_size (_film->frame_size ());
131
132         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
133
134         setup_pieces ();
135         seek (DCPTime (), true);
136 }
137
138 Player::~Player ()
139 {
140         delete _shuffler;
141 }
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157 bool
158 have_audio (shared_ptr<const Content> content)
159 {
160         return static_cast<bool>(content->audio);
161 }
162
163 void
164 Player::setup_pieces_unlocked ()
165 {
166         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
167
168         list<shared_ptr<Piece> > old_pieces = _pieces;
169         _pieces.clear ();
170
171         delete _shuffler;
172         _shuffler = new Shuffler();
173         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
174
175         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
176
177                 if (!i->paths_valid ()) {
178                         continue;
179                 }
180
181                 if (_ignore_video && _ignore_audio && i->text.empty()) {
182                         /* We're only interested in text and this content has none */
183                         continue;
184                 }
185
186                 shared_ptr<Decoder> old_decoder;
187                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
188                         if (j->content == i) {
189                                 old_decoder = j->decoder;
190                                 break;
191                         }
192                 }
193
194                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
195                 FrameRateChange frc (_film, i);
196
197                 if (!decoder) {
198                         /* Not something that we can decode; e.g. Atmos content */
199                         continue;
200                 }
201
202                 if (decoder->video && _ignore_video) {
203                         decoder->video->set_ignore (true);
204                 }
205
206                 if (decoder->audio && _ignore_audio) {
207                         decoder->audio->set_ignore (true);
208                 }
209
210                 if (_ignore_text) {
211                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
212                                 i->set_ignore (true);
213                         }
214                 }
215
216                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217                 if (dcp) {
218                         dcp->set_decode_referenced (_play_referenced);
219                         if (_play_referenced) {
220                                 dcp->set_forced_reduction (_dcp_decode_reduction);
221                         }
222                 }
223
224                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
225                 _pieces.push_back (piece);
226
227                 if (decoder->video) {
228                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
229                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231                         } else {
232                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
233                         }
234                 }
235
236                 if (decoder->audio) {
237                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
238                 }
239
240                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
241
242                 while (j != decoder->text.end()) {
243                         (*j)->BitmapStart.connect (
244                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246                         (*j)->PlainStart.connect (
247                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
248                                 );
249                         (*j)->Stop.connect (
250                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252
253                         ++j;
254                 }
255         }
256
257         _stream_states.clear ();
258         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
259                 if (i->content->audio) {
260                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
261                                 _stream_states[j] = StreamState (i, i->content->position ());
262                         }
263                 }
264         }
265
266         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
267         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
268
269         _last_video_time = DCPTime ();
270         _last_video_eyes = EYES_BOTH;
271         _last_audio_time = DCPTime ();
272 }
273
274 void
275 Player::playlist_content_change (ChangeType type, int property, bool frequent)
276 {
277         if (type == CHANGE_TYPE_PENDING) {
278                 /* The player content is probably about to change, so we can't carry on
279                    until that has happened and we've rebuilt our pieces.  Stop pass()
280                    and seek() from working until then.
281                 */
282                 ++_suspended;
283         } else if (type == CHANGE_TYPE_DONE) {
284                 /* A change in our content has gone through.  Re-build our pieces. */
285                 setup_pieces ();
286                 --_suspended;
287         } else if (type == CHANGE_TYPE_CANCELLED) {
288                 --_suspended;
289         }
290
291         Change (type, property, frequent);
292 }
293
294 void
295 Player::set_video_container_size (dcp::Size s)
296 {
297         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
298
299         {
300                 boost::mutex::scoped_lock lm (_mutex);
301
302                 if (s == _video_container_size) {
303                         lm.unlock ();
304                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
305                         return;
306                 }
307
308                 _video_container_size = s;
309
310                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
311                 _black_image->make_black ();
312         }
313
314         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
315 }
316
317 void
318 Player::playlist_change (ChangeType type)
319 {
320         if (type == CHANGE_TYPE_DONE) {
321                 setup_pieces ();
322         }
323         Change (type, PlayerProperty::PLAYLIST, false);
324 }
325
326 void
327 Player::film_change (ChangeType type, Film::Property p)
328 {
329         /* Here we should notice Film properties that affect our output, and
330            alert listeners that our output now would be different to how it was
331            last time we were run.
332         */
333
334         if (p == Film::CONTAINER) {
335                 Change (type, PlayerProperty::FILM_CONTAINER, false);
336         } else if (p == Film::VIDEO_FRAME_RATE) {
337                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
338                    so we need new pieces here.
339                 */
340                 if (type == CHANGE_TYPE_DONE) {
341                         setup_pieces ();
342                 }
343                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
344         } else if (p == Film::AUDIO_PROCESSOR) {
345                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
346                         boost::mutex::scoped_lock lm (_mutex);
347                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
348                 }
349         } else if (p == Film::AUDIO_CHANNELS) {
350                 if (type == CHANGE_TYPE_DONE) {
351                         boost::mutex::scoped_lock lm (_mutex);
352                         _audio_merger.clear ();
353                 }
354         }
355 }
356
357 shared_ptr<PlayerVideo>
358 Player::black_player_video_frame (Eyes eyes) const
359 {
360         return shared_ptr<PlayerVideo> (
361                 new PlayerVideo (
362                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
363                         Crop (),
364                         optional<double> (),
365                         _video_container_size,
366                         _video_container_size,
367                         eyes,
368                         PART_WHOLE,
369                         PresetColourConversion::all().front().conversion,
370                         VIDEO_RANGE_FULL,
371                         boost::weak_ptr<Content>(),
372                         boost::optional<Frame>(),
373                         false
374                 )
375         );
376 }
377
378 Frame
379 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
380 {
381         DCPTime s = t - piece->content->position ();
382         s = min (piece->content->length_after_trim(_film), s);
383         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
384
385         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
386            then convert that ContentTime to frames at the content's rate.  However this fails for
387            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
388            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
389
390            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
391         */
392         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
393 }
394
395 DCPTime
396 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
397 {
398         /* See comment in dcp_to_content_video */
399         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
400         return d + piece->content->position();
401 }
402
403 Frame
404 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
405 {
406         DCPTime s = t - piece->content->position ();
407         s = min (piece->content->length_after_trim(_film), s);
408         /* See notes in dcp_to_content_video */
409         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
410 }
411
412 DCPTime
413 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
414 {
415         /* See comment in dcp_to_content_video */
416         return DCPTime::from_frames (f, _film->audio_frame_rate())
417                 - DCPTime (piece->content->trim_start(), piece->frc)
418                 + piece->content->position();
419 }
420
421 ContentTime
422 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
423 {
424         DCPTime s = t - piece->content->position ();
425         s = min (piece->content->length_after_trim(_film), s);
426         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
427 }
428
429 DCPTime
430 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
431 {
432         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
433 }
434
435 list<shared_ptr<Font> >
436 Player::get_subtitle_fonts ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439
440         list<shared_ptr<Font> > fonts;
441         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
442                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
443                         /* XXX: things may go wrong if there are duplicate font IDs
444                            with different font files.
445                         */
446                         list<shared_ptr<Font> > f = j->fonts ();
447                         copy (f.begin(), f.end(), back_inserter (fonts));
448                 }
449         }
450
451         return fonts;
452 }
453
454 /** Set this player never to produce any video data */
455 void
456 Player::set_ignore_video ()
457 {
458         boost::mutex::scoped_lock lm (_mutex);
459         _ignore_video = true;
460         setup_pieces_unlocked ();
461 }
462
463 void
464 Player::set_ignore_audio ()
465 {
466         boost::mutex::scoped_lock lm (_mutex);
467         _ignore_audio = true;
468         setup_pieces_unlocked ();
469 }
470
471 void
472 Player::set_ignore_text ()
473 {
474         boost::mutex::scoped_lock lm (_mutex);
475         _ignore_text = true;
476         setup_pieces_unlocked ();
477 }
478
479 /** Set the player to always burn open texts into the image regardless of the content settings */
480 void
481 Player::set_always_burn_open_subtitles ()
482 {
483         boost::mutex::scoped_lock lm (_mutex);
484         _always_burn_open_subtitles = true;
485 }
486
487 /** Sets up the player to be faster, possibly at the expense of quality */
488 void
489 Player::set_fast ()
490 {
491         boost::mutex::scoped_lock lm (_mutex);
492         _fast = true;
493         setup_pieces_unlocked ();
494 }
495
496 void
497 Player::set_play_referenced ()
498 {
499         boost::mutex::scoped_lock lm (_mutex);
500         _play_referenced = true;
501         setup_pieces_unlocked ();
502 }
503
504 static void
505 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
506 {
507         DCPOMATIC_ASSERT (r);
508         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
509         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
510         if (r->actual_duration() > 0) {
511                 a.push_back (
512                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
513                         );
514         }
515 }
516
517 list<ReferencedReelAsset>
518 Player::get_reel_assets ()
519 {
520         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
521
522         list<ReferencedReelAsset> a;
523
524         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
525                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
526                 if (!j) {
527                         continue;
528                 }
529
530                 scoped_ptr<DCPDecoder> decoder;
531                 try {
532                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
533                 } catch (...) {
534                         return a;
535                 }
536
537                 DCPOMATIC_ASSERT (j->video_frame_rate ());
538                 double const cfr = j->video_frame_rate().get();
539                 Frame const trim_start = j->trim_start().frames_round (cfr);
540                 Frame const trim_end = j->trim_end().frames_round (cfr);
541                 int const ffr = _film->video_frame_rate ();
542
543                 /* position in the asset from the start */
544                 int64_t offset_from_start = 0;
545                 /* position in the asset from the end */
546                 int64_t offset_from_end = 0;
547                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
548                         /* Assume that main picture duration is the length of the reel */
549                         offset_from_end += k->main_picture()->actual_duration();
550                 }
551
552                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
553
554                         /* Assume that main picture duration is the length of the reel */
555                         int64_t const reel_duration = k->main_picture()->actual_duration();
556
557                         /* See doc/design/trim_reels.svg */
558                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
559                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
560
561                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
562                         if (j->reference_video ()) {
563                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
564                         }
565
566                         if (j->reference_audio ()) {
567                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
568                         }
569
570                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
571                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
572                         }
573
574                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
575                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
576                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
577                                 }
578                         }
579
580                         offset_from_start += reel_duration;
581                         offset_from_end -= reel_duration;
582                 }
583         }
584
585         return a;
586 }
587
588 bool
589 Player::pass ()
590 {
591         boost::mutex::scoped_lock lm (_mutex);
592
593         if (_suspended) {
594                 /* We can't pass in this state */
595                 return false;
596         }
597
598         if (_playback_length == DCPTime()) {
599                 /* Special; just give one black frame */
600                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
601                 return true;
602         }
603
604         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
605
606         shared_ptr<Piece> earliest_content;
607         optional<DCPTime> earliest_time;
608
609         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
610                 if (i->done) {
611                         continue;
612                 }
613
614                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
615                 if (t > i->content->end(_film)) {
616                         i->done = true;
617                 } else {
618
619                         /* Given two choices at the same time, pick the one with texts so we see it before
620                            the video.
621                         */
622                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
623                                 earliest_time = t;
624                                 earliest_content = i;
625                         }
626                 }
627         }
628
629         bool done = false;
630
631         enum {
632                 NONE,
633                 CONTENT,
634                 BLACK,
635                 SILENT
636         } which = NONE;
637
638         if (earliest_content) {
639                 which = CONTENT;
640         }
641
642         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
643                 earliest_time = _black.position ();
644                 which = BLACK;
645         }
646
647         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
648                 earliest_time = _silent.position ();
649                 which = SILENT;
650         }
651
652         switch (which) {
653         case CONTENT:
654         {
655                 earliest_content->done = earliest_content->decoder->pass ();
656                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
657                 if (dcp && !_play_referenced && dcp->reference_audio()) {
658                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
659                            to `hide' the fact that no audio was emitted during the referenced DCP (though
660                            we need to behave as though it was).
661                         */
662                         _last_audio_time = dcp->end (_film);
663                 }
664                 break;
665         }
666         case BLACK:
667                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
668                 _black.set_position (_black.position() + one_video_frame());
669                 break;
670         case SILENT:
671         {
672                 DCPTimePeriod period (_silent.period_at_position());
673                 if (_last_audio_time) {
674                         /* Sometimes the thing that happened last finishes fractionally before
675                            or after this silence.  Bodge the start time of the silence to fix it.
676                            I think this is nothing to worry about since we will just add or
677                            remove a little silence at the end of some content.
678                         */
679                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
680                         /* Let's not worry about less than a frame at 24fps */
681                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
682                         if (error >= too_much_error) {
683                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
684                         }
685                         DCPOMATIC_ASSERT (error < too_much_error);
686                         period.from = *_last_audio_time;
687                 }
688                 if (period.duration() > one_video_frame()) {
689                         period.to = period.from + one_video_frame();
690                 }
691                 fill_audio (period);
692                 _silent.set_position (period.to);
693                 break;
694         }
695         case NONE:
696                 done = true;
697                 break;
698         }
699
700         /* Emit any audio that is ready */
701
702         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
703            of our streams, or the position of the _silent.
704         */
705         DCPTime pull_to = _playback_length;
706         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
707                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
708                         pull_to = i->second.last_push_end;
709                 }
710         }
711         if (!_silent.done() && _silent.position() < pull_to) {
712                 pull_to = _silent.position();
713         }
714
715         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
716         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
717                 if (_last_audio_time && i->second < *_last_audio_time) {
718                         /* This new data comes before the last we emitted (or the last seek); discard it */
719                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
720                         if (!cut.first) {
721                                 continue;
722                         }
723                         *i = cut;
724                 } else if (_last_audio_time && i->second > *_last_audio_time) {
725                         /* There's a gap between this data and the last we emitted; fill with silence */
726                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
727                 }
728
729                 emit_audio (i->first, i->second);
730         }
731
732         if (done) {
733                 _shuffler->flush ();
734                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
735                         do_emit_video(i->first, i->second);
736                 }
737         }
738
739         return done;
740 }
741
742 /** @return Open subtitles for the frame at the given time, converted to images */
743 optional<PositionImage>
744 Player::open_subtitles_for_frame (DCPTime time) const
745 {
746         list<PositionImage> captions;
747         int const vfr = _film->video_frame_rate();
748
749         BOOST_FOREACH (
750                 PlayerText j,
751                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
752                 ) {
753
754                 /* Bitmap subtitles */
755                 BOOST_FOREACH (BitmapText i, j.bitmap) {
756                         if (!i.image) {
757                                 continue;
758                         }
759
760                         /* i.image will already have been scaled to fit _video_container_size */
761                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
762
763                         captions.push_back (
764                                 PositionImage (
765                                         i.image,
766                                         Position<int> (
767                                                 lrint (_video_container_size.width * i.rectangle.x),
768                                                 lrint (_video_container_size.height * i.rectangle.y)
769                                                 )
770                                         )
771                                 );
772                 }
773
774                 /* String subtitles (rendered to an image) */
775                 if (!j.string.empty ()) {
776                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
777                         copy (s.begin(), s.end(), back_inserter (captions));
778                 }
779         }
780
781         if (captions.empty ()) {
782                 return optional<PositionImage> ();
783         }
784
785         return merge (captions);
786 }
787
788 void
789 Player::video (weak_ptr<Piece> wp, ContentVideo video)
790 {
791         shared_ptr<Piece> piece = wp.lock ();
792         if (!piece) {
793                 return;
794         }
795
796         FrameRateChange frc (_film, piece->content);
797         if (frc.skip && (video.frame % 2) == 1) {
798                 return;
799         }
800
801         /* Time of the first frame we will emit */
802         DCPTime const time = content_video_to_dcp (piece, video.frame);
803
804         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
805            if it's after the content's period here as in that case we still need to fill any gap between
806            `now' and the end of the content's period.
807         */
808         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
809                 return;
810         }
811
812         /* Fill gaps that we discover now that we have some video which needs to be emitted.
813            This is where we need to fill to.
814         */
815         DCPTime fill_to = min (time, piece->content->end(_film));
816
817         if (_last_video_time) {
818                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
819
820                 /* Fill if we have more than half a frame to do */
821                 if ((fill_to - fill_from) > one_video_frame() / 2) {
822                         LastVideoMap::const_iterator last = _last_video.find (wp);
823                         if (_film->three_d()) {
824                                 Eyes fill_to_eyes = video.eyes;
825                                 if (fill_to_eyes == EYES_BOTH) {
826                                         fill_to_eyes = EYES_LEFT;
827                                 }
828                                 if (fill_to == piece->content->end(_film)) {
829                                         /* Don't fill after the end of the content */
830                                         fill_to_eyes = EYES_LEFT;
831                                 }
832                                 DCPTime j = fill_from;
833                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
834                                 if (eyes == EYES_BOTH) {
835                                         eyes = EYES_LEFT;
836                                 }
837                                 while (j < fill_to || eyes != fill_to_eyes) {
838                                         if (last != _last_video.end()) {
839                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
840                                                 copy->set_eyes (eyes);
841                                                 emit_video (copy, j);
842                                         } else {
843                                                 emit_video (black_player_video_frame(eyes), j);
844                                         }
845                                         if (eyes == EYES_RIGHT) {
846                                                 j += one_video_frame();
847                                         }
848                                         eyes = increment_eyes (eyes);
849                                 }
850                         } else {
851                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
852                                         if (last != _last_video.end()) {
853                                                 emit_video (last->second, j);
854                                         } else {
855                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
856                                         }
857                                 }
858                         }
859                 }
860         }
861
862         _last_video[wp].reset (
863                 new PlayerVideo (
864                         video.image,
865                         piece->content->video->crop (),
866                         piece->content->video->fade (_film, video.frame),
867                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
868                         _video_container_size,
869                         video.eyes,
870                         video.part,
871                         piece->content->video->colour_conversion(),
872                         piece->content->video->range(),
873                         piece->content,
874                         video.frame,
875                         false
876                         )
877                 );
878
879         DCPTime t = time;
880         for (int i = 0; i < frc.repeat; ++i) {
881                 if (t < piece->content->end(_film)) {
882                         emit_video (_last_video[wp], t);
883                 }
884                 t += one_video_frame ();
885         }
886 }
887
888 void
889 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
890 {
891         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
892
893         shared_ptr<Piece> piece = wp.lock ();
894         if (!piece) {
895                 return;
896         }
897
898         shared_ptr<AudioContent> content = piece->content->audio;
899         DCPOMATIC_ASSERT (content);
900
901         int const rfr = content->resampled_frame_rate (_film);
902
903         /* Compute time in the DCP */
904         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
905         /* And the end of this block in the DCP */
906         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
907
908         /* Remove anything that comes before the start or after the end of the content */
909         if (time < piece->content->position()) {
910                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
911                 if (!cut.first) {
912                         /* This audio is entirely discarded */
913                         return;
914                 }
915                 content_audio.audio = cut.first;
916                 time = cut.second;
917         } else if (time > piece->content->end(_film)) {
918                 /* Discard it all */
919                 return;
920         } else if (end > piece->content->end(_film)) {
921                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
922                 if (remaining_frames == 0) {
923                         return;
924                 }
925                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
926         }
927
928         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
929
930         /* Gain */
931
932         if (content->gain() != 0) {
933                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
934                 gain->apply_gain (content->gain ());
935                 content_audio.audio = gain;
936         }
937
938         /* Remap */
939
940         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
941
942         /* Process */
943
944         if (_audio_processor) {
945                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
946         }
947
948         /* Push */
949
950         _audio_merger.push (content_audio.audio, time);
951         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
952         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
953 }
954
955 void
956 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
957 {
958         shared_ptr<Piece> piece = wp.lock ();
959         shared_ptr<const TextContent> text = wc.lock ();
960         if (!piece || !text) {
961                 return;
962         }
963
964         /* Apply content's subtitle offsets */
965         subtitle.sub.rectangle.x += text->x_offset ();
966         subtitle.sub.rectangle.y += text->y_offset ();
967
968         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
969         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
970         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
971
972         /* Apply content's subtitle scale */
973         subtitle.sub.rectangle.width *= text->x_scale ();
974         subtitle.sub.rectangle.height *= text->y_scale ();
975
976         PlayerText ps;
977         shared_ptr<Image> image = subtitle.sub.image;
978
979         /* We will scale the subtitle up to fit _video_container_size */
980         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
981         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
982         if (width == 0 || height == 0) {
983                 return;
984         }
985
986         dcp::Size scaled_size (width, height);
987         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
988         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
989
990         _active_texts[text->type()].add_from (wc, ps, from);
991 }
992
993 void
994 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
995 {
996         shared_ptr<Piece> piece = wp.lock ();
997         shared_ptr<const TextContent> text = wc.lock ();
998         if (!piece || !text) {
999                 return;
1000         }
1001
1002         PlayerText ps;
1003         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1004
1005         if (from > piece->content->end(_film)) {
1006                 return;
1007         }
1008
1009         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1010                 s.set_h_position (s.h_position() + text->x_offset ());
1011                 s.set_v_position (s.v_position() + text->y_offset ());
1012                 float const xs = text->x_scale();
1013                 float const ys = text->y_scale();
1014                 float size = s.size();
1015
1016                 /* Adjust size to express the common part of the scaling;
1017                    e.g. if xs = ys = 0.5 we scale size by 2.
1018                 */
1019                 if (xs > 1e-5 && ys > 1e-5) {
1020                         size *= 1 / min (1 / xs, 1 / ys);
1021                 }
1022                 s.set_size (size);
1023
1024                 /* Then express aspect ratio changes */
1025                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1026                         s.set_aspect_adjust (xs / ys);
1027                 }
1028
1029                 s.set_in (dcp::Time(from.seconds(), 1000));
1030                 ps.string.push_back (StringText (s, text->outline_width()));
1031                 ps.add_fonts (text->fonts ());
1032         }
1033
1034         _active_texts[text->type()].add_from (wc, ps, from);
1035 }
1036
1037 void
1038 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1039 {
1040         shared_ptr<const TextContent> text = wc.lock ();
1041         if (!text) {
1042                 return;
1043         }
1044
1045         if (!_active_texts[text->type()].have(wc)) {
1046                 return;
1047         }
1048
1049         shared_ptr<Piece> piece = wp.lock ();
1050         if (!piece) {
1051                 return;
1052         }
1053
1054         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1055
1056         if (dcp_to > piece->content->end(_film)) {
1057                 return;
1058         }
1059
1060         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1061
1062         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1063         if (text->use() && !always && !text->burn()) {
1064                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1065         }
1066 }
1067
1068 void
1069 Player::seek (DCPTime time, bool accurate)
1070 {
1071         boost::mutex::scoped_lock lm (_mutex);
1072
1073         if (_suspended) {
1074                 /* We can't seek in this state */
1075                 return;
1076         }
1077
1078         if (_shuffler) {
1079                 _shuffler->clear ();
1080         }
1081
1082         _delay.clear ();
1083
1084         if (_audio_processor) {
1085                 _audio_processor->flush ();
1086         }
1087
1088         _audio_merger.clear ();
1089         for (int i = 0; i < TEXT_COUNT; ++i) {
1090                 _active_texts[i].clear ();
1091         }
1092
1093         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1094                 if (time < i->content->position()) {
1095                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1096                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1097                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1098                            been trimmed to a point between keyframes, or something).
1099                         */
1100                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1101                         i->done = false;
1102                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1103                         /* During; seek to position */
1104                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1105                         i->done = false;
1106                 } else {
1107                         /* After; this piece is done */
1108                         i->done = true;
1109                 }
1110         }
1111
1112         if (accurate) {
1113                 _last_video_time = time;
1114                 _last_video_eyes = EYES_LEFT;
1115                 _last_audio_time = time;
1116         } else {
1117                 _last_video_time = optional<DCPTime>();
1118                 _last_video_eyes = optional<Eyes>();
1119                 _last_audio_time = optional<DCPTime>();
1120         }
1121
1122         _black.set_position (time);
1123         _silent.set_position (time);
1124
1125         _last_video.clear ();
1126 }
1127
1128 void
1129 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1130 {
1131         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1132            player before the video that requires them.
1133         */
1134         _delay.push_back (make_pair (pv, time));
1135
1136         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1137                 _last_video_time = time + one_video_frame();
1138         }
1139         _last_video_eyes = increment_eyes (pv->eyes());
1140
1141         if (_delay.size() < 3) {
1142                 return;
1143         }
1144
1145         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1146         _delay.pop_front();
1147         do_emit_video (to_do.first, to_do.second);
1148 }
1149
1150 void
1151 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1152 {
1153         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1154                 for (int i = 0; i < TEXT_COUNT; ++i) {
1155                         _active_texts[i].clear_before (time);
1156                 }
1157         }
1158
1159         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1160         if (subtitles) {
1161                 pv->set_text (subtitles.get ());
1162         }
1163
1164         Video (pv, time);
1165 }
1166
1167 void
1168 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1169 {
1170         /* Log if the assert below is about to fail */
1171         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1172                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1173         }
1174
1175         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1176         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1177         Audio (data, time, _film->audio_frame_rate());
1178         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1179 }
1180
1181 void
1182 Player::fill_audio (DCPTimePeriod period)
1183 {
1184         if (period.from == period.to) {
1185                 return;
1186         }
1187
1188         DCPOMATIC_ASSERT (period.from < period.to);
1189
1190         DCPTime t = period.from;
1191         while (t < period.to) {
1192                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1193                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1194                 if (samples) {
1195                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1196                         silence->make_silent ();
1197                         emit_audio (silence, t);
1198                 }
1199                 t += block;
1200         }
1201 }
1202
1203 DCPTime
1204 Player::one_video_frame () const
1205 {
1206         return DCPTime::from_frames (1, _film->video_frame_rate ());
1207 }
1208
1209 pair<shared_ptr<AudioBuffers>, DCPTime>
1210 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1211 {
1212         DCPTime const discard_time = discard_to - time;
1213         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1214         Frame remaining_frames = audio->frames() - discard_frames;
1215         if (remaining_frames <= 0) {
1216                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1217         }
1218         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1219         return make_pair(cut, time + discard_time);
1220 }
1221
1222 void
1223 Player::set_dcp_decode_reduction (optional<int> reduction)
1224 {
1225         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1226
1227         {
1228                 boost::mutex::scoped_lock lm (_mutex);
1229
1230                 if (reduction == _dcp_decode_reduction) {
1231                         lm.unlock ();
1232                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1233                         return;
1234                 }
1235
1236                 _dcp_decode_reduction = reduction;
1237                 setup_pieces_unlocked ();
1238         }
1239
1240         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1241 }
1242
1243 optional<DCPTime>
1244 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1245 {
1246         boost::mutex::scoped_lock lm (_mutex);
1247
1248         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1249                 if (i->content == content) {
1250                         return content_time_to_dcp (i, t);
1251                 }
1252         }
1253
1254         /* We couldn't find this content; perhaps things are being changed over */
1255         return optional<DCPTime>();
1256 }
1257
1258
1259 shared_ptr<const Playlist>
1260 Player::playlist () const
1261 {
1262         return _playlist ? _playlist : _film->playlist();
1263 }
1264