Make player more tolerant of some DCP errors.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _tolerant (film->tolerant())
97         , _play_referenced (false)
98         , _audio_merger (_film->audio_frame_rate())
99         , _shuffler (0)
100 {
101         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102         /* The butler must hear about this first, so since we are proxying this through to the butler we must
103            be first.
104         */
105         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107         set_video_container_size (_film->frame_size ());
108
109         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
110
111         setup_pieces ();
112         seek (DCPTime (), true);
113 }
114
115 Player::~Player ()
116 {
117         delete _shuffler;
118 }
119
120 void
121 Player::setup_pieces ()
122 {
123         boost::mutex::scoped_lock lm (_mutex);
124         setup_pieces_unlocked ();
125 }
126
127 bool
128 have_video (shared_ptr<Piece> piece)
129 {
130         return piece->decoder && piece->decoder->video;
131 }
132
133 bool
134 have_audio (shared_ptr<Piece> piece)
135 {
136         return piece->decoder && piece->decoder->audio;
137 }
138
139 void
140 Player::setup_pieces_unlocked ()
141 {
142         list<shared_ptr<Piece> > old_pieces = _pieces;
143         _pieces.clear ();
144
145         delete _shuffler;
146         _shuffler = new Shuffler();
147         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
148
149         cout << "SPU " << _playlist->content().size() << ".\n";
150
151         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
152
153                 if (!i->paths_valid ()) {
154                         cout << "not valid.\n";
155                         continue;
156                 }
157
158                 if (_ignore_video && _ignore_audio && i->text.empty()) {
159                         cout << "text only.\n";
160                         /* We're only interested in text and this content has none */
161                         continue;
162                 }
163
164                 shared_ptr<Decoder> old_decoder;
165                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
166                         if (j->content == i) {
167                                 old_decoder = j->decoder;
168                                 break;
169                         }
170                 }
171
172                 cout << " DF " << _tolerant << "\n";
173                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
174                 FrameRateChange frc (_film, i);
175
176                 if (!decoder) {
177                         /* Not something that we can decode; e.g. Atmos content */
178                         continue;
179                 }
180
181                 if (decoder->video && _ignore_video) {
182                         decoder->video->set_ignore (true);
183                 }
184
185                 if (decoder->audio && _ignore_audio) {
186                         decoder->audio->set_ignore (true);
187                 }
188
189                 if (_ignore_text) {
190                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
191                                 i->set_ignore (true);
192                         }
193                 }
194
195                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
196                 if (dcp) {
197                         dcp->set_decode_referenced (_play_referenced);
198                         if (_play_referenced) {
199                                 dcp->set_forced_reduction (_dcp_decode_reduction);
200                         }
201                 }
202
203                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
204                 _pieces.push_back (piece);
205
206                 if (decoder->video) {
207                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
208                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
209                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
210                         } else {
211                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
212                         }
213                 }
214
215                 if (decoder->audio) {
216                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
217                 }
218
219                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
220
221                 while (j != decoder->text.end()) {
222                         (*j)->BitmapStart.connect (
223                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
224                                 );
225                         (*j)->PlainStart.connect (
226                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
227                                 );
228                         (*j)->Stop.connect (
229                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
230                                 );
231
232                         ++j;
233                 }
234         }
235
236         _stream_states.clear ();
237         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
238                 if (i->content->audio) {
239                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
240                                 _stream_states[j] = StreamState (i, i->content->position ());
241                         }
242                 }
243         }
244
245         _black = Empty (_film, _pieces, bind(&have_video, _1));
246         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
247
248         _last_video_time = DCPTime ();
249         _last_video_eyes = EYES_BOTH;
250         _last_audio_time = DCPTime ();
251 }
252
253 void
254 Player::playlist_content_change (ChangeType type, int property, bool frequent)
255 {
256         if (type == CHANGE_TYPE_PENDING) {
257                 boost::mutex::scoped_lock lm (_mutex);
258                 /* The player content is probably about to change, so we can't carry on
259                    until that has happened and we've rebuilt our pieces.  Stop pass()
260                    and seek() from working until then.
261                 */
262                 _suspended = true;
263         } else if (type == CHANGE_TYPE_DONE) {
264                 /* A change in our content has gone through.  Re-build our pieces. */
265                 setup_pieces ();
266                 _suspended = false;
267         } else if (type == CHANGE_TYPE_CANCELLED) {
268                 boost::mutex::scoped_lock lm (_mutex);
269                 _suspended = false;
270         }
271
272         Change (type, property, frequent);
273 }
274
275 void
276 Player::set_video_container_size (dcp::Size s)
277 {
278         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
279
280         {
281                 boost::mutex::scoped_lock lm (_mutex);
282
283                 if (s == _video_container_size) {
284                         lm.unlock ();
285                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
286                         return;
287                 }
288
289                 _video_container_size = s;
290
291                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
292                 _black_image->make_black ();
293         }
294
295         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
296 }
297
298 void
299 Player::playlist_change (ChangeType type)
300 {
301         if (type == CHANGE_TYPE_DONE) {
302                 setup_pieces ();
303         }
304         Change (type, PlayerProperty::PLAYLIST, false);
305 }
306
307 void
308 Player::film_change (ChangeType type, Film::Property p)
309 {
310         /* Here we should notice Film properties that affect our output, and
311            alert listeners that our output now would be different to how it was
312            last time we were run.
313         */
314
315         if (p == Film::CONTAINER) {
316                 Change (type, PlayerProperty::FILM_CONTAINER, false);
317         } else if (p == Film::VIDEO_FRAME_RATE) {
318                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
319                    so we need new pieces here.
320                 */
321                 if (type == CHANGE_TYPE_DONE) {
322                         setup_pieces ();
323                 }
324                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
325         } else if (p == Film::AUDIO_PROCESSOR) {
326                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
327                         boost::mutex::scoped_lock lm (_mutex);
328                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
329                 }
330         } else if (p == Film::AUDIO_CHANNELS) {
331                 if (type == CHANGE_TYPE_DONE) {
332                         boost::mutex::scoped_lock lm (_mutex);
333                         _audio_merger.clear ();
334                 }
335         }
336 }
337
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
340 {
341         return shared_ptr<PlayerVideo> (
342                 new PlayerVideo (
343                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344                         Crop (),
345                         optional<double> (),
346                         _video_container_size,
347                         _video_container_size,
348                         eyes,
349                         PART_WHOLE,
350                         PresetColourConversion::all().front().conversion,
351                         VIDEO_RANGE_FULL,
352                         boost::weak_ptr<Content>(),
353                         boost::optional<Frame>()
354                 )
355         );
356 }
357
358 Frame
359 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
360 {
361         DCPTime s = t - piece->content->position ();
362         s = min (piece->content->length_after_trim(_film), s);
363         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
364
365         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
366            then convert that ContentTime to frames at the content's rate.  However this fails for
367            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
368            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
369
370            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
371         */
372         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
373 }
374
375 DCPTime
376 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 {
378         /* See comment in dcp_to_content_video */
379         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
380         return d + piece->content->position();
381 }
382
383 Frame
384 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
385 {
386         DCPTime s = t - piece->content->position ();
387         s = min (piece->content->length_after_trim(_film), s);
388         /* See notes in dcp_to_content_video */
389         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
390 }
391
392 DCPTime
393 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
394 {
395         /* See comment in dcp_to_content_video */
396         return DCPTime::from_frames (f, _film->audio_frame_rate())
397                 - DCPTime (piece->content->trim_start(), piece->frc)
398                 + piece->content->position();
399 }
400
401 ContentTime
402 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
403 {
404         DCPTime s = t - piece->content->position ();
405         s = min (piece->content->length_after_trim(_film), s);
406         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
407 }
408
409 DCPTime
410 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
411 {
412         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
413 }
414
415 list<shared_ptr<Font> >
416 Player::get_subtitle_fonts ()
417 {
418         boost::mutex::scoped_lock lm (_mutex);
419
420         list<shared_ptr<Font> > fonts;
421         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
422                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
423                         /* XXX: things may go wrong if there are duplicate font IDs
424                            with different font files.
425                         */
426                         list<shared_ptr<Font> > f = j->fonts ();
427                         copy (f.begin(), f.end(), back_inserter (fonts));
428                 }
429         }
430
431         return fonts;
432 }
433
434 /** Set this player never to produce any video data */
435 void
436 Player::set_ignore_video ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439         _ignore_video = true;
440         setup_pieces_unlocked ();
441 }
442
443 void
444 Player::set_ignore_audio ()
445 {
446         boost::mutex::scoped_lock lm (_mutex);
447         _ignore_audio = true;
448         setup_pieces_unlocked ();
449 }
450
451 void
452 Player::set_ignore_text ()
453 {
454         boost::mutex::scoped_lock lm (_mutex);
455         _ignore_text = true;
456         setup_pieces_unlocked ();
457 }
458
459 /** Set the player to always burn open texts into the image regardless of the content settings */
460 void
461 Player::set_always_burn_open_subtitles ()
462 {
463         boost::mutex::scoped_lock lm (_mutex);
464         _always_burn_open_subtitles = true;
465 }
466
467 /** Sets up the player to be faster, possibly at the expense of quality */
468 void
469 Player::set_fast ()
470 {
471         boost::mutex::scoped_lock lm (_mutex);
472         _fast = true;
473         setup_pieces_unlocked ();
474 }
475
476 void
477 Player::set_play_referenced ()
478 {
479         boost::mutex::scoped_lock lm (_mutex);
480         _play_referenced = true;
481         setup_pieces_unlocked ();
482 }
483
484 static void
485 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
486 {
487         DCPOMATIC_ASSERT (r);
488         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
489         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
490         if (r->actual_duration() > 0) {
491                 a.push_back (
492                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
493                         );
494         }
495 }
496
497 list<ReferencedReelAsset>
498 Player::get_reel_assets ()
499 {
500         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
501
502         list<ReferencedReelAsset> a;
503
504         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
505                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
506                 if (!j) {
507                         continue;
508                 }
509
510                 scoped_ptr<DCPDecoder> decoder;
511                 try {
512                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
513                 } catch (...) {
514                         return a;
515                 }
516
517                 DCPOMATIC_ASSERT (j->video_frame_rate ());
518                 double const cfr = j->video_frame_rate().get();
519                 Frame const trim_start = j->trim_start().frames_round (cfr);
520                 Frame const trim_end = j->trim_end().frames_round (cfr);
521                 int const ffr = _film->video_frame_rate ();
522
523                 /* position in the asset from the start */
524                 int64_t offset_from_start = 0;
525                 /* position in the asset from the end */
526                 int64_t offset_from_end = 0;
527                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
528                         /* Assume that main picture duration is the length of the reel */
529                         offset_from_end += k->main_picture()->actual_duration();
530                 }
531
532                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
533
534                         /* Assume that main picture duration is the length of the reel */
535                         int64_t const reel_duration = k->main_picture()->actual_duration();
536
537                         /* See doc/design/trim_reels.svg */
538                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
539                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
540
541                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
542                         if (j->reference_video ()) {
543                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
544                         }
545
546                         if (j->reference_audio ()) {
547                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
548                         }
549
550                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
551                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
552                         }
553
554                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
555                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
556                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
557                                 }
558                         }
559
560                         offset_from_start += reel_duration;
561                         offset_from_end -= reel_duration;
562                 }
563         }
564
565         return a;
566 }
567
568 bool
569 Player::pass ()
570 {
571         boost::mutex::scoped_lock lm (_mutex);
572
573         if (_suspended) {
574                 /* We can't pass in this state */
575                 return false;
576         }
577
578         if (_playlist->length(_film) == DCPTime()) {
579                 /* Special case of an empty Film; just give one black frame */
580                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
581                 return true;
582         }
583
584         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
585
586         shared_ptr<Piece> earliest_content;
587         optional<DCPTime> earliest_time;
588
589         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
590                 if (i->done) {
591                         continue;
592                 }
593
594                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
595                 if (t > i->content->end(_film)) {
596                         i->done = true;
597                 } else {
598
599                         /* Given two choices at the same time, pick the one with texts so we see it before
600                            the video.
601                         */
602                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
603                                 earliest_time = t;
604                                 earliest_content = i;
605                         }
606                 }
607         }
608
609         bool done = false;
610
611         enum {
612                 NONE,
613                 CONTENT,
614                 BLACK,
615                 SILENT
616         } which = NONE;
617
618         if (earliest_content) {
619                 which = CONTENT;
620         }
621
622         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
623                 earliest_time = _black.position ();
624                 which = BLACK;
625         }
626
627         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
628                 earliest_time = _silent.position ();
629                 which = SILENT;
630         }
631
632         switch (which) {
633         case CONTENT:
634         {
635                 earliest_content->done = earliest_content->decoder->pass ();
636                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
637                 if (dcp && !_play_referenced && dcp->reference_audio()) {
638                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
639                            to `hide' the fact that no audio was emitted during the referenced DCP (though
640                            we need to behave as though it was).
641                         */
642                         _last_audio_time = dcp->end (_film);
643                 }
644                 break;
645         }
646         case BLACK:
647                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
648                 _black.set_position (_black.position() + one_video_frame());
649                 break;
650         case SILENT:
651         {
652                 DCPTimePeriod period (_silent.period_at_position());
653                 if (_last_audio_time) {
654                         /* Sometimes the thing that happened last finishes fractionally before
655                            or after this silence.  Bodge the start time of the silence to fix it.
656                         */
657                         DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
658                         period.from = *_last_audio_time;
659                 }
660                 if (period.duration() > one_video_frame()) {
661                         period.to = period.from + one_video_frame();
662                 }
663                 fill_audio (period);
664                 _silent.set_position (period.to);
665                 break;
666         }
667         case NONE:
668                 done = true;
669                 break;
670         }
671
672         /* Emit any audio that is ready */
673
674         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
675            of our streams, or the position of the _silent.
676         */
677         DCPTime pull_to = _film->length ();
678         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
679                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
680                         pull_to = i->second.last_push_end;
681                 }
682         }
683         if (!_silent.done() && _silent.position() < pull_to) {
684                 pull_to = _silent.position();
685         }
686
687         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
688         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
689                 if (_last_audio_time && i->second < *_last_audio_time) {
690                         /* This new data comes before the last we emitted (or the last seek); discard it */
691                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
692                         if (!cut.first) {
693                                 continue;
694                         }
695                         *i = cut;
696                 } else if (_last_audio_time && i->second > *_last_audio_time) {
697                         /* There's a gap between this data and the last we emitted; fill with silence */
698                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
699                 }
700
701                 emit_audio (i->first, i->second);
702         }
703
704         if (done) {
705                 _shuffler->flush ();
706                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
707                         do_emit_video(i->first, i->second);
708                 }
709         }
710
711         return done;
712 }
713
714 /** @return Open subtitles for the frame at the given time, converted to images */
715 optional<PositionImage>
716 Player::open_subtitles_for_frame (DCPTime time) const
717 {
718         list<PositionImage> captions;
719         int const vfr = _film->video_frame_rate();
720
721         BOOST_FOREACH (
722                 PlayerText j,
723                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
724                 ) {
725
726                 /* Bitmap subtitles */
727                 BOOST_FOREACH (BitmapText i, j.bitmap) {
728                         if (!i.image) {
729                                 continue;
730                         }
731
732                         /* i.image will already have been scaled to fit _video_container_size */
733                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
734
735                         captions.push_back (
736                                 PositionImage (
737                                         i.image,
738                                         Position<int> (
739                                                 lrint (_video_container_size.width * i.rectangle.x),
740                                                 lrint (_video_container_size.height * i.rectangle.y)
741                                                 )
742                                         )
743                                 );
744                 }
745
746                 /* String subtitles (rendered to an image) */
747                 if (!j.string.empty ()) {
748                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
749                         copy (s.begin(), s.end(), back_inserter (captions));
750                 }
751         }
752
753         if (captions.empty ()) {
754                 return optional<PositionImage> ();
755         }
756
757         return merge (captions);
758 }
759
760 void
761 Player::video (weak_ptr<Piece> wp, ContentVideo video)
762 {
763         shared_ptr<Piece> piece = wp.lock ();
764         if (!piece) {
765                 return;
766         }
767
768         FrameRateChange frc (_film, piece->content);
769         if (frc.skip && (video.frame % 2) == 1) {
770                 return;
771         }
772
773         /* Time of the first frame we will emit */
774         DCPTime const time = content_video_to_dcp (piece, video.frame);
775
776         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
777            if it's after the content's period here as in that case we still need to fill any gap between
778            `now' and the end of the content's period.
779         */
780         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
781                 return;
782         }
783
784         /* Fill gaps that we discover now that we have some video which needs to be emitted.
785            This is where we need to fill to.
786         */
787         DCPTime fill_to = min (time, piece->content->end(_film));
788
789         if (_last_video_time) {
790                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
791
792                 /* Fill if we have more than half a frame to do */
793                 if ((fill_to - fill_from) > one_video_frame() / 2) {
794                         LastVideoMap::const_iterator last = _last_video.find (wp);
795                         if (_film->three_d()) {
796                                 Eyes fill_to_eyes = video.eyes;
797                                 if (fill_to_eyes == EYES_BOTH) {
798                                         fill_to_eyes = EYES_LEFT;
799                                 }
800                                 if (fill_to == piece->content->end(_film)) {
801                                         /* Don't fill after the end of the content */
802                                         fill_to_eyes = EYES_LEFT;
803                                 }
804                                 DCPTime j = fill_from;
805                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
806                                 if (eyes == EYES_BOTH) {
807                                         eyes = EYES_LEFT;
808                                 }
809                                 while (j < fill_to || eyes != fill_to_eyes) {
810                                         if (last != _last_video.end()) {
811                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
812                                                 copy->set_eyes (eyes);
813                                                 emit_video (copy, j);
814                                         } else {
815                                                 emit_video (black_player_video_frame(eyes), j);
816                                         }
817                                         if (eyes == EYES_RIGHT) {
818                                                 j += one_video_frame();
819                                         }
820                                         eyes = increment_eyes (eyes);
821                                 }
822                         } else {
823                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
824                                         if (last != _last_video.end()) {
825                                                 emit_video (last->second, j);
826                                         } else {
827                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
828                                         }
829                                 }
830                         }
831                 }
832         }
833
834         _last_video[wp].reset (
835                 new PlayerVideo (
836                         video.image,
837                         piece->content->video->crop (),
838                         piece->content->video->fade (_film, video.frame),
839                         piece->content->video->scale().size (
840                                 piece->content->video, _video_container_size, _film->frame_size ()
841                                 ),
842                         _video_container_size,
843                         video.eyes,
844                         video.part,
845                         piece->content->video->colour_conversion(),
846                         piece->content->video->range(),
847                         piece->content,
848                         video.frame
849                         )
850                 );
851
852         DCPTime t = time;
853         for (int i = 0; i < frc.repeat; ++i) {
854                 if (t < piece->content->end(_film)) {
855                         emit_video (_last_video[wp], t);
856                 }
857                 t += one_video_frame ();
858         }
859 }
860
861 void
862 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
863 {
864         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
865
866         shared_ptr<Piece> piece = wp.lock ();
867         if (!piece) {
868                 return;
869         }
870
871         shared_ptr<AudioContent> content = piece->content->audio;
872         DCPOMATIC_ASSERT (content);
873
874         int const rfr = content->resampled_frame_rate (_film);
875
876         /* Compute time in the DCP */
877         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
878         /* And the end of this block in the DCP */
879         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
880
881         /* Remove anything that comes before the start or after the end of the content */
882         if (time < piece->content->position()) {
883                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
884                 if (!cut.first) {
885                         /* This audio is entirely discarded */
886                         return;
887                 }
888                 content_audio.audio = cut.first;
889                 time = cut.second;
890         } else if (time > piece->content->end(_film)) {
891                 /* Discard it all */
892                 return;
893         } else if (end > piece->content->end(_film)) {
894                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
895                 if (remaining_frames == 0) {
896                         return;
897                 }
898                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
899                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
900                 content_audio.audio = cut;
901         }
902
903         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
904
905         /* Gain */
906
907         if (content->gain() != 0) {
908                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
909                 gain->apply_gain (content->gain ());
910                 content_audio.audio = gain;
911         }
912
913         /* Remap */
914
915         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
916
917         /* Process */
918
919         if (_audio_processor) {
920                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
921         }
922
923         /* Push */
924
925         _audio_merger.push (content_audio.audio, time);
926         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
927         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
928 }
929
930 void
931 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
932 {
933         shared_ptr<Piece> piece = wp.lock ();
934         shared_ptr<const TextContent> text = wc.lock ();
935         if (!piece || !text) {
936                 return;
937         }
938
939         /* Apply content's subtitle offsets */
940         subtitle.sub.rectangle.x += text->x_offset ();
941         subtitle.sub.rectangle.y += text->y_offset ();
942
943         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
944         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
945         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
946
947         /* Apply content's subtitle scale */
948         subtitle.sub.rectangle.width *= text->x_scale ();
949         subtitle.sub.rectangle.height *= text->y_scale ();
950
951         PlayerText ps;
952         shared_ptr<Image> image = subtitle.sub.image;
953         /* We will scale the subtitle up to fit _video_container_size */
954         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
955         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
956         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
957
958         _active_texts[text->type()].add_from (wc, ps, from);
959 }
960
961 void
962 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
963 {
964         shared_ptr<Piece> piece = wp.lock ();
965         shared_ptr<const TextContent> text = wc.lock ();
966         if (!piece || !text) {
967                 return;
968         }
969
970         PlayerText ps;
971         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
972
973         if (from > piece->content->end(_film)) {
974                 return;
975         }
976
977         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
978                 s.set_h_position (s.h_position() + text->x_offset ());
979                 s.set_v_position (s.v_position() + text->y_offset ());
980                 float const xs = text->x_scale();
981                 float const ys = text->y_scale();
982                 float size = s.size();
983
984                 /* Adjust size to express the common part of the scaling;
985                    e.g. if xs = ys = 0.5 we scale size by 2.
986                 */
987                 if (xs > 1e-5 && ys > 1e-5) {
988                         size *= 1 / min (1 / xs, 1 / ys);
989                 }
990                 s.set_size (size);
991
992                 /* Then express aspect ratio changes */
993                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
994                         s.set_aspect_adjust (xs / ys);
995                 }
996
997                 s.set_in (dcp::Time(from.seconds(), 1000));
998                 ps.string.push_back (StringText (s, text->outline_width()));
999                 ps.add_fonts (text->fonts ());
1000         }
1001
1002         _active_texts[text->type()].add_from (wc, ps, from);
1003 }
1004
1005 void
1006 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1007 {
1008         shared_ptr<const TextContent> text = wc.lock ();
1009         if (!text) {
1010                 return;
1011         }
1012
1013         if (!_active_texts[text->type()].have(wc)) {
1014                 return;
1015         }
1016
1017         shared_ptr<Piece> piece = wp.lock ();
1018         if (!piece) {
1019                 return;
1020         }
1021
1022         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1023
1024         if (dcp_to > piece->content->end(_film)) {
1025                 return;
1026         }
1027
1028         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1029
1030         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1031         if (text->use() && !always && !text->burn()) {
1032                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1033         }
1034 }
1035
1036 void
1037 Player::seek (DCPTime time, bool accurate)
1038 {
1039         boost::mutex::scoped_lock lm (_mutex);
1040
1041         if (_suspended) {
1042                 /* We can't seek in this state */
1043                 return;
1044         }
1045
1046         if (_shuffler) {
1047                 _shuffler->clear ();
1048         }
1049
1050         _delay.clear ();
1051
1052         if (_audio_processor) {
1053                 _audio_processor->flush ();
1054         }
1055
1056         _audio_merger.clear ();
1057         for (int i = 0; i < TEXT_COUNT; ++i) {
1058                 _active_texts[i].clear ();
1059         }
1060
1061         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1062                 if (time < i->content->position()) {
1063                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1064                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1065                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1066                            been trimmed to a point between keyframes, or something).
1067                         */
1068                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1069                         i->done = false;
1070                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1071                         /* During; seek to position */
1072                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1073                         i->done = false;
1074                 } else {
1075                         /* After; this piece is done */
1076                         i->done = true;
1077                 }
1078         }
1079
1080         if (accurate) {
1081                 _last_video_time = time;
1082                 _last_video_eyes = EYES_LEFT;
1083                 _last_audio_time = time;
1084         } else {
1085                 _last_video_time = optional<DCPTime>();
1086                 _last_video_eyes = optional<Eyes>();
1087                 _last_audio_time = optional<DCPTime>();
1088         }
1089
1090         _black.set_position (time);
1091         _silent.set_position (time);
1092
1093         _last_video.clear ();
1094 }
1095
1096 void
1097 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1098 {
1099         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1100            player before the video that requires them.
1101         */
1102         _delay.push_back (make_pair (pv, time));
1103
1104         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1105                 _last_video_time = time + one_video_frame();
1106         }
1107         _last_video_eyes = increment_eyes (pv->eyes());
1108
1109         if (_delay.size() < 3) {
1110                 return;
1111         }
1112
1113         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1114         _delay.pop_front();
1115         do_emit_video (to_do.first, to_do.second);
1116 }
1117
1118 void
1119 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1120 {
1121         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1122                 for (int i = 0; i < TEXT_COUNT; ++i) {
1123                         _active_texts[i].clear_before (time);
1124                 }
1125         }
1126
1127         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1128         if (subtitles) {
1129                 pv->set_text (subtitles.get ());
1130         }
1131
1132         Video (pv, time);
1133 }
1134
1135 void
1136 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1137 {
1138         /* Log if the assert below is about to fail */
1139         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1140                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1141         }
1142
1143         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1144         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1145         Audio (data, time, _film->audio_frame_rate());
1146         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1147 }
1148
1149 void
1150 Player::fill_audio (DCPTimePeriod period)
1151 {
1152         if (period.from == period.to) {
1153                 return;
1154         }
1155
1156         DCPOMATIC_ASSERT (period.from < period.to);
1157
1158         DCPTime t = period.from;
1159         while (t < period.to) {
1160                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1161                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1162                 if (samples) {
1163                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1164                         silence->make_silent ();
1165                         emit_audio (silence, t);
1166                 }
1167                 t += block;
1168         }
1169 }
1170
1171 DCPTime
1172 Player::one_video_frame () const
1173 {
1174         return DCPTime::from_frames (1, _film->video_frame_rate ());
1175 }
1176
1177 pair<shared_ptr<AudioBuffers>, DCPTime>
1178 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1179 {
1180         DCPTime const discard_time = discard_to - time;
1181         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1182         Frame remaining_frames = audio->frames() - discard_frames;
1183         if (remaining_frames <= 0) {
1184                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1185         }
1186         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1187         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1188         return make_pair(cut, time + discard_time);
1189 }
1190
1191 void
1192 Player::set_dcp_decode_reduction (optional<int> reduction)
1193 {
1194         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1195
1196         {
1197                 boost::mutex::scoped_lock lm (_mutex);
1198
1199                 if (reduction == _dcp_decode_reduction) {
1200                         lm.unlock ();
1201                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1202                         return;
1203                 }
1204
1205                 _dcp_decode_reduction = reduction;
1206                 setup_pieces_unlocked ();
1207         }
1208
1209         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1210 }
1211
1212 optional<DCPTime>
1213 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1214 {
1215         boost::mutex::scoped_lock lm (_mutex);
1216
1217         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1218                 if (i->content == content) {
1219                         return content_time_to_dcp (i, t);
1220                 }
1221         }
1222
1223         /* We couldn't find this content; perhaps things are being changed over */
1224         return optional<DCPTime>();
1225 }