Add accessor for _playlist.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86 int const PlayerProperty::PLAYBACK_LENGTH = 705;
87
88 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_, DCPTime playback_length)
89         : _film (film)
90         , _playlist (playlist_)
91         , _suspended (0)
92         , _ignore_video (false)
93         , _ignore_audio (false)
94         , _ignore_text (false)
95         , _always_burn_open_subtitles (false)
96         , _fast (false)
97         , _tolerant (film->tolerant())
98         , _play_referenced (false)
99         , _audio_merger (_film->audio_frame_rate())
100         , _shuffler (0)
101         , _playback_length (playback_length)
102 {
103         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
104         /* The butler must hear about this first, so since we are proxying this through to the butler we must
105            be first.
106         */
107         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
108         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
109         set_video_container_size (_film->frame_size ());
110
111         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
112
113         setup_pieces ();
114         seek (DCPTime (), true);
115 }
116
117 Player::~Player ()
118 {
119         delete _shuffler;
120 }
121
122 void
123 Player::setup_pieces ()
124 {
125         boost::mutex::scoped_lock lm (_mutex);
126         setup_pieces_unlocked ();
127 }
128
129
130 void
131 Player::set_playback_length (DCPTime len)
132 {
133         Change (CHANGE_TYPE_PENDING, PlayerProperty::PLAYBACK_LENGTH, false);
134         _playback_length = len;
135         Change (CHANGE_TYPE_DONE, PlayerProperty::PLAYBACK_LENGTH, false);
136         setup_pieces ();
137 }
138
139 bool
140 have_video (shared_ptr<const Content> content)
141 {
142         return static_cast<bool>(content->video);
143 }
144
145 bool
146 have_audio (shared_ptr<const Content> content)
147 {
148         return static_cast<bool>(content->audio);
149 }
150
151 void
152 Player::setup_pieces_unlocked ()
153 {
154         list<shared_ptr<Piece> > old_pieces = _pieces;
155         _pieces.clear ();
156
157         delete _shuffler;
158         _shuffler = new Shuffler();
159         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
160
161         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
162
163                 if (!i->paths_valid ()) {
164                         continue;
165                 }
166
167                 if (_ignore_video && _ignore_audio && i->text.empty()) {
168                         /* We're only interested in text and this content has none */
169                         continue;
170                 }
171
172                 shared_ptr<Decoder> old_decoder;
173                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
174                         if (j->content == i) {
175                                 old_decoder = j->decoder;
176                                 break;
177                         }
178                 }
179
180                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
181                 FrameRateChange frc (_film, i);
182
183                 if (!decoder) {
184                         /* Not something that we can decode; e.g. Atmos content */
185                         continue;
186                 }
187
188                 if (decoder->video && _ignore_video) {
189                         decoder->video->set_ignore (true);
190                 }
191
192                 if (decoder->audio && _ignore_audio) {
193                         decoder->audio->set_ignore (true);
194                 }
195
196                 if (_ignore_text) {
197                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
198                                 i->set_ignore (true);
199                         }
200                 }
201
202                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
203                 if (dcp) {
204                         dcp->set_decode_referenced (_play_referenced);
205                         if (_play_referenced) {
206                                 dcp->set_forced_reduction (_dcp_decode_reduction);
207                         }
208                 }
209
210                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
211                 _pieces.push_back (piece);
212
213                 if (decoder->video) {
214                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
215                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
216                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
217                         } else {
218                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
219                         }
220                 }
221
222                 if (decoder->audio) {
223                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
224                 }
225
226                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
227
228                 while (j != decoder->text.end()) {
229                         (*j)->BitmapStart.connect (
230                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
231                                 );
232                         (*j)->PlainStart.connect (
233                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
234                                 );
235                         (*j)->Stop.connect (
236                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
237                                 );
238
239                         ++j;
240                 }
241         }
242
243         _stream_states.clear ();
244         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
245                 if (i->content->audio) {
246                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
247                                 _stream_states[j] = StreamState (i, i->content->position ());
248                         }
249                 }
250         }
251
252         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
253         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
254
255         _last_video_time = DCPTime ();
256         _last_video_eyes = EYES_BOTH;
257         _last_audio_time = DCPTime ();
258 }
259
260 void
261 Player::playlist_content_change (ChangeType type, int property, bool frequent)
262 {
263         if (type == CHANGE_TYPE_PENDING) {
264                 /* The player content is probably about to change, so we can't carry on
265                    until that has happened and we've rebuilt our pieces.  Stop pass()
266                    and seek() from working until then.
267                 */
268                 ++_suspended;
269         } else if (type == CHANGE_TYPE_DONE) {
270                 /* A change in our content has gone through.  Re-build our pieces. */
271                 setup_pieces ();
272                 --_suspended;
273         } else if (type == CHANGE_TYPE_CANCELLED) {
274                 --_suspended;
275         }
276
277         Change (type, property, frequent);
278 }
279
280 void
281 Player::set_video_container_size (dcp::Size s)
282 {
283         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284
285         {
286                 boost::mutex::scoped_lock lm (_mutex);
287
288                 if (s == _video_container_size) {
289                         lm.unlock ();
290                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
291                         return;
292                 }
293
294                 _video_container_size = s;
295
296                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
297                 _black_image->make_black ();
298         }
299
300         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
301 }
302
303 void
304 Player::playlist_change (ChangeType type)
305 {
306         if (type == CHANGE_TYPE_DONE) {
307                 setup_pieces ();
308         }
309         Change (type, PlayerProperty::PLAYLIST, false);
310 }
311
312 void
313 Player::film_change (ChangeType type, Film::Property p)
314 {
315         /* Here we should notice Film properties that affect our output, and
316            alert listeners that our output now would be different to how it was
317            last time we were run.
318         */
319
320         if (p == Film::CONTAINER) {
321                 Change (type, PlayerProperty::FILM_CONTAINER, false);
322         } else if (p == Film::VIDEO_FRAME_RATE) {
323                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
324                    so we need new pieces here.
325                 */
326                 if (type == CHANGE_TYPE_DONE) {
327                         setup_pieces ();
328                 }
329                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
330         } else if (p == Film::AUDIO_PROCESSOR) {
331                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
332                         boost::mutex::scoped_lock lm (_mutex);
333                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
334                 }
335         } else if (p == Film::AUDIO_CHANNELS) {
336                 if (type == CHANGE_TYPE_DONE) {
337                         boost::mutex::scoped_lock lm (_mutex);
338                         _audio_merger.clear ();
339                 }
340         }
341 }
342
343 shared_ptr<PlayerVideo>
344 Player::black_player_video_frame (Eyes eyes) const
345 {
346         return shared_ptr<PlayerVideo> (
347                 new PlayerVideo (
348                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
349                         Crop (),
350                         optional<double> (),
351                         _video_container_size,
352                         _video_container_size,
353                         eyes,
354                         PART_WHOLE,
355                         PresetColourConversion::all().front().conversion,
356                         VIDEO_RANGE_FULL,
357                         boost::weak_ptr<Content>(),
358                         boost::optional<Frame>(),
359                         false
360                 )
361         );
362 }
363
364 Frame
365 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
366 {
367         DCPTime s = t - piece->content->position ();
368         s = min (piece->content->length_after_trim(_film), s);
369         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
370
371         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
372            then convert that ContentTime to frames at the content's rate.  However this fails for
373            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
374            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
375
376            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
377         */
378         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
379 }
380
381 DCPTime
382 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
383 {
384         /* See comment in dcp_to_content_video */
385         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
386         return d + piece->content->position();
387 }
388
389 Frame
390 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
391 {
392         DCPTime s = t - piece->content->position ();
393         s = min (piece->content->length_after_trim(_film), s);
394         /* See notes in dcp_to_content_video */
395         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
396 }
397
398 DCPTime
399 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
400 {
401         /* See comment in dcp_to_content_video */
402         return DCPTime::from_frames (f, _film->audio_frame_rate())
403                 - DCPTime (piece->content->trim_start(), piece->frc)
404                 + piece->content->position();
405 }
406
407 ContentTime
408 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
409 {
410         DCPTime s = t - piece->content->position ();
411         s = min (piece->content->length_after_trim(_film), s);
412         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
413 }
414
415 DCPTime
416 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
417 {
418         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
419 }
420
421 list<shared_ptr<Font> >
422 Player::get_subtitle_fonts ()
423 {
424         boost::mutex::scoped_lock lm (_mutex);
425
426         list<shared_ptr<Font> > fonts;
427         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
428                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
429                         /* XXX: things may go wrong if there are duplicate font IDs
430                            with different font files.
431                         */
432                         list<shared_ptr<Font> > f = j->fonts ();
433                         copy (f.begin(), f.end(), back_inserter (fonts));
434                 }
435         }
436
437         return fonts;
438 }
439
440 /** Set this player never to produce any video data */
441 void
442 Player::set_ignore_video ()
443 {
444         boost::mutex::scoped_lock lm (_mutex);
445         _ignore_video = true;
446         setup_pieces_unlocked ();
447 }
448
449 void
450 Player::set_ignore_audio ()
451 {
452         boost::mutex::scoped_lock lm (_mutex);
453         _ignore_audio = true;
454         setup_pieces_unlocked ();
455 }
456
457 void
458 Player::set_ignore_text ()
459 {
460         boost::mutex::scoped_lock lm (_mutex);
461         _ignore_text = true;
462         setup_pieces_unlocked ();
463 }
464
465 /** Set the player to always burn open texts into the image regardless of the content settings */
466 void
467 Player::set_always_burn_open_subtitles ()
468 {
469         boost::mutex::scoped_lock lm (_mutex);
470         _always_burn_open_subtitles = true;
471 }
472
473 /** Sets up the player to be faster, possibly at the expense of quality */
474 void
475 Player::set_fast ()
476 {
477         boost::mutex::scoped_lock lm (_mutex);
478         _fast = true;
479         setup_pieces_unlocked ();
480 }
481
482 void
483 Player::set_play_referenced ()
484 {
485         boost::mutex::scoped_lock lm (_mutex);
486         _play_referenced = true;
487         setup_pieces_unlocked ();
488 }
489
490 static void
491 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
492 {
493         DCPOMATIC_ASSERT (r);
494         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
495         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
496         if (r->actual_duration() > 0) {
497                 a.push_back (
498                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
499                         );
500         }
501 }
502
503 list<ReferencedReelAsset>
504 Player::get_reel_assets ()
505 {
506         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
507
508         list<ReferencedReelAsset> a;
509
510         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
511                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
512                 if (!j) {
513                         continue;
514                 }
515
516                 scoped_ptr<DCPDecoder> decoder;
517                 try {
518                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
519                 } catch (...) {
520                         return a;
521                 }
522
523                 DCPOMATIC_ASSERT (j->video_frame_rate ());
524                 double const cfr = j->video_frame_rate().get();
525                 Frame const trim_start = j->trim_start().frames_round (cfr);
526                 Frame const trim_end = j->trim_end().frames_round (cfr);
527                 int const ffr = _film->video_frame_rate ();
528
529                 /* position in the asset from the start */
530                 int64_t offset_from_start = 0;
531                 /* position in the asset from the end */
532                 int64_t offset_from_end = 0;
533                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
534                         /* Assume that main picture duration is the length of the reel */
535                         offset_from_end += k->main_picture()->actual_duration();
536                 }
537
538                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
539
540                         /* Assume that main picture duration is the length of the reel */
541                         int64_t const reel_duration = k->main_picture()->actual_duration();
542
543                         /* See doc/design/trim_reels.svg */
544                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
545                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
546
547                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
548                         if (j->reference_video ()) {
549                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
550                         }
551
552                         if (j->reference_audio ()) {
553                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
554                         }
555
556                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
557                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
558                         }
559
560                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
561                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
562                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
563                                 }
564                         }
565
566                         offset_from_start += reel_duration;
567                         offset_from_end -= reel_duration;
568                 }
569         }
570
571         return a;
572 }
573
574 bool
575 Player::pass ()
576 {
577         boost::mutex::scoped_lock lm (_mutex);
578
579         if (_suspended) {
580                 /* We can't pass in this state */
581                 return false;
582         }
583
584         if (_playback_length == DCPTime()) {
585                 /* Special; just give one black frame */
586                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
587                 return true;
588         }
589
590         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
591
592         shared_ptr<Piece> earliest_content;
593         optional<DCPTime> earliest_time;
594
595         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
596                 if (i->done) {
597                         continue;
598                 }
599
600                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
601                 if (t > i->content->end(_film)) {
602                         i->done = true;
603                 } else {
604
605                         /* Given two choices at the same time, pick the one with texts so we see it before
606                            the video.
607                         */
608                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
609                                 earliest_time = t;
610                                 earliest_content = i;
611                         }
612                 }
613         }
614
615         bool done = false;
616
617         enum {
618                 NONE,
619                 CONTENT,
620                 BLACK,
621                 SILENT
622         } which = NONE;
623
624         if (earliest_content) {
625                 which = CONTENT;
626         }
627
628         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
629                 earliest_time = _black.position ();
630                 which = BLACK;
631         }
632
633         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
634                 earliest_time = _silent.position ();
635                 which = SILENT;
636         }
637
638         switch (which) {
639         case CONTENT:
640         {
641                 earliest_content->done = earliest_content->decoder->pass ();
642                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
643                 if (dcp && !_play_referenced && dcp->reference_audio()) {
644                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
645                            to `hide' the fact that no audio was emitted during the referenced DCP (though
646                            we need to behave as though it was).
647                         */
648                         _last_audio_time = dcp->end (_film);
649                 }
650                 break;
651         }
652         case BLACK:
653                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
654                 _black.set_position (_black.position() + one_video_frame());
655                 break;
656         case SILENT:
657         {
658                 DCPTimePeriod period (_silent.period_at_position());
659                 if (_last_audio_time) {
660                         /* Sometimes the thing that happened last finishes fractionally before
661                            or after this silence.  Bodge the start time of the silence to fix it.
662                            I think this is nothing to worry about since we will just add or
663                            remove a little silence at the end of some content.
664                         */
665                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
666                         /* Let's not worry about less than a frame at 24fps */
667                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
668                         if (error >= too_much_error) {
669                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
670                         }
671                         DCPOMATIC_ASSERT (error < too_much_error);
672                         period.from = *_last_audio_time;
673                 }
674                 if (period.duration() > one_video_frame()) {
675                         period.to = period.from + one_video_frame();
676                 }
677                 fill_audio (period);
678                 _silent.set_position (period.to);
679                 break;
680         }
681         case NONE:
682                 done = true;
683                 break;
684         }
685
686         /* Emit any audio that is ready */
687
688         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
689            of our streams, or the position of the _silent.
690         */
691         DCPTime pull_to = _playback_length;
692         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
693                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
694                         pull_to = i->second.last_push_end;
695                 }
696         }
697         if (!_silent.done() && _silent.position() < pull_to) {
698                 pull_to = _silent.position();
699         }
700
701         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
702         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
703                 if (_last_audio_time && i->second < *_last_audio_time) {
704                         /* This new data comes before the last we emitted (or the last seek); discard it */
705                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
706                         if (!cut.first) {
707                                 continue;
708                         }
709                         *i = cut;
710                 } else if (_last_audio_time && i->second > *_last_audio_time) {
711                         /* There's a gap between this data and the last we emitted; fill with silence */
712                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
713                 }
714
715                 emit_audio (i->first, i->second);
716         }
717
718         if (done) {
719                 _shuffler->flush ();
720                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
721                         do_emit_video(i->first, i->second);
722                 }
723         }
724
725         return done;
726 }
727
728 /** @return Open subtitles for the frame at the given time, converted to images */
729 optional<PositionImage>
730 Player::open_subtitles_for_frame (DCPTime time) const
731 {
732         list<PositionImage> captions;
733         int const vfr = _film->video_frame_rate();
734
735         BOOST_FOREACH (
736                 PlayerText j,
737                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
738                 ) {
739
740                 /* Bitmap subtitles */
741                 BOOST_FOREACH (BitmapText i, j.bitmap) {
742                         if (!i.image) {
743                                 continue;
744                         }
745
746                         /* i.image will already have been scaled to fit _video_container_size */
747                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
748
749                         captions.push_back (
750                                 PositionImage (
751                                         i.image,
752                                         Position<int> (
753                                                 lrint (_video_container_size.width * i.rectangle.x),
754                                                 lrint (_video_container_size.height * i.rectangle.y)
755                                                 )
756                                         )
757                                 );
758                 }
759
760                 /* String subtitles (rendered to an image) */
761                 if (!j.string.empty ()) {
762                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
763                         copy (s.begin(), s.end(), back_inserter (captions));
764                 }
765         }
766
767         if (captions.empty ()) {
768                 return optional<PositionImage> ();
769         }
770
771         return merge (captions);
772 }
773
774 void
775 Player::video (weak_ptr<Piece> wp, ContentVideo video)
776 {
777         shared_ptr<Piece> piece = wp.lock ();
778         if (!piece) {
779                 return;
780         }
781
782         FrameRateChange frc (_film, piece->content);
783         if (frc.skip && (video.frame % 2) == 1) {
784                 return;
785         }
786
787         /* Time of the first frame we will emit */
788         DCPTime const time = content_video_to_dcp (piece, video.frame);
789
790         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
791            if it's after the content's period here as in that case we still need to fill any gap between
792            `now' and the end of the content's period.
793         */
794         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
795                 return;
796         }
797
798         /* Fill gaps that we discover now that we have some video which needs to be emitted.
799            This is where we need to fill to.
800         */
801         DCPTime fill_to = min (time, piece->content->end(_film));
802
803         if (_last_video_time) {
804                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
805
806                 /* Fill if we have more than half a frame to do */
807                 if ((fill_to - fill_from) > one_video_frame() / 2) {
808                         LastVideoMap::const_iterator last = _last_video.find (wp);
809                         if (_film->three_d()) {
810                                 Eyes fill_to_eyes = video.eyes;
811                                 if (fill_to_eyes == EYES_BOTH) {
812                                         fill_to_eyes = EYES_LEFT;
813                                 }
814                                 if (fill_to == piece->content->end(_film)) {
815                                         /* Don't fill after the end of the content */
816                                         fill_to_eyes = EYES_LEFT;
817                                 }
818                                 DCPTime j = fill_from;
819                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
820                                 if (eyes == EYES_BOTH) {
821                                         eyes = EYES_LEFT;
822                                 }
823                                 while (j < fill_to || eyes != fill_to_eyes) {
824                                         if (last != _last_video.end()) {
825                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
826                                                 copy->set_eyes (eyes);
827                                                 emit_video (copy, j);
828                                         } else {
829                                                 emit_video (black_player_video_frame(eyes), j);
830                                         }
831                                         if (eyes == EYES_RIGHT) {
832                                                 j += one_video_frame();
833                                         }
834                                         eyes = increment_eyes (eyes);
835                                 }
836                         } else {
837                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
838                                         if (last != _last_video.end()) {
839                                                 emit_video (last->second, j);
840                                         } else {
841                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
842                                         }
843                                 }
844                         }
845                 }
846         }
847
848         _last_video[wp].reset (
849                 new PlayerVideo (
850                         video.image,
851                         piece->content->video->crop (),
852                         piece->content->video->fade (_film, video.frame),
853                         piece->content->video->scale().size (
854                                 piece->content->video, _video_container_size, _film->frame_size ()
855                                 ),
856                         _video_container_size,
857                         video.eyes,
858                         video.part,
859                         piece->content->video->colour_conversion(),
860                         piece->content->video->range(),
861                         piece->content,
862                         video.frame,
863                         false
864                         )
865                 );
866
867         DCPTime t = time;
868         for (int i = 0; i < frc.repeat; ++i) {
869                 if (t < piece->content->end(_film)) {
870                         emit_video (_last_video[wp], t);
871                 }
872                 t += one_video_frame ();
873         }
874 }
875
876 void
877 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
878 {
879         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
880
881         shared_ptr<Piece> piece = wp.lock ();
882         if (!piece) {
883                 return;
884         }
885
886         shared_ptr<AudioContent> content = piece->content->audio;
887         DCPOMATIC_ASSERT (content);
888
889         int const rfr = content->resampled_frame_rate (_film);
890
891         /* Compute time in the DCP */
892         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
893         /* And the end of this block in the DCP */
894         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
895
896         /* Remove anything that comes before the start or after the end of the content */
897         if (time < piece->content->position()) {
898                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
899                 if (!cut.first) {
900                         /* This audio is entirely discarded */
901                         return;
902                 }
903                 content_audio.audio = cut.first;
904                 time = cut.second;
905         } else if (time > piece->content->end(_film)) {
906                 /* Discard it all */
907                 return;
908         } else if (end > piece->content->end(_film)) {
909                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
910                 if (remaining_frames == 0) {
911                         return;
912                 }
913                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
914         }
915
916         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
917
918         /* Gain */
919
920         if (content->gain() != 0) {
921                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
922                 gain->apply_gain (content->gain ());
923                 content_audio.audio = gain;
924         }
925
926         /* Remap */
927
928         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
929
930         /* Process */
931
932         if (_audio_processor) {
933                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
934         }
935
936         /* Push */
937
938         _audio_merger.push (content_audio.audio, time);
939         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
940         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
941 }
942
943 void
944 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
945 {
946         shared_ptr<Piece> piece = wp.lock ();
947         shared_ptr<const TextContent> text = wc.lock ();
948         if (!piece || !text) {
949                 return;
950         }
951
952         /* Apply content's subtitle offsets */
953         subtitle.sub.rectangle.x += text->x_offset ();
954         subtitle.sub.rectangle.y += text->y_offset ();
955
956         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
957         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
958         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
959
960         /* Apply content's subtitle scale */
961         subtitle.sub.rectangle.width *= text->x_scale ();
962         subtitle.sub.rectangle.height *= text->y_scale ();
963
964         PlayerText ps;
965         shared_ptr<Image> image = subtitle.sub.image;
966
967         /* We will scale the subtitle up to fit _video_container_size */
968         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
969         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
970         if (width == 0 || height == 0) {
971                 return;
972         }
973
974         dcp::Size scaled_size (width, height);
975         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
976         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
977
978         _active_texts[text->type()].add_from (wc, ps, from);
979 }
980
981 void
982 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
983 {
984         shared_ptr<Piece> piece = wp.lock ();
985         shared_ptr<const TextContent> text = wc.lock ();
986         if (!piece || !text) {
987                 return;
988         }
989
990         PlayerText ps;
991         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
992
993         if (from > piece->content->end(_film)) {
994                 return;
995         }
996
997         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
998                 s.set_h_position (s.h_position() + text->x_offset ());
999                 s.set_v_position (s.v_position() + text->y_offset ());
1000                 float const xs = text->x_scale();
1001                 float const ys = text->y_scale();
1002                 float size = s.size();
1003
1004                 /* Adjust size to express the common part of the scaling;
1005                    e.g. if xs = ys = 0.5 we scale size by 2.
1006                 */
1007                 if (xs > 1e-5 && ys > 1e-5) {
1008                         size *= 1 / min (1 / xs, 1 / ys);
1009                 }
1010                 s.set_size (size);
1011
1012                 /* Then express aspect ratio changes */
1013                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1014                         s.set_aspect_adjust (xs / ys);
1015                 }
1016
1017                 s.set_in (dcp::Time(from.seconds(), 1000));
1018                 ps.string.push_back (StringText (s, text->outline_width()));
1019                 ps.add_fonts (text->fonts ());
1020         }
1021
1022         _active_texts[text->type()].add_from (wc, ps, from);
1023 }
1024
1025 void
1026 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1027 {
1028         shared_ptr<const TextContent> text = wc.lock ();
1029         if (!text) {
1030                 return;
1031         }
1032
1033         if (!_active_texts[text->type()].have(wc)) {
1034                 return;
1035         }
1036
1037         shared_ptr<Piece> piece = wp.lock ();
1038         if (!piece) {
1039                 return;
1040         }
1041
1042         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1043
1044         if (dcp_to > piece->content->end(_film)) {
1045                 return;
1046         }
1047
1048         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1049
1050         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1051         if (text->use() && !always && !text->burn()) {
1052                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1053         }
1054 }
1055
1056 void
1057 Player::seek (DCPTime time, bool accurate)
1058 {
1059         boost::mutex::scoped_lock lm (_mutex);
1060
1061         if (_suspended) {
1062                 /* We can't seek in this state */
1063                 return;
1064         }
1065
1066         if (_shuffler) {
1067                 _shuffler->clear ();
1068         }
1069
1070         _delay.clear ();
1071
1072         if (_audio_processor) {
1073                 _audio_processor->flush ();
1074         }
1075
1076         _audio_merger.clear ();
1077         for (int i = 0; i < TEXT_COUNT; ++i) {
1078                 _active_texts[i].clear ();
1079         }
1080
1081         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1082                 if (time < i->content->position()) {
1083                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1084                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1085                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1086                            been trimmed to a point between keyframes, or something).
1087                         */
1088                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1089                         i->done = false;
1090                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1091                         /* During; seek to position */
1092                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1093                         i->done = false;
1094                 } else {
1095                         /* After; this piece is done */
1096                         i->done = true;
1097                 }
1098         }
1099
1100         if (accurate) {
1101                 _last_video_time = time;
1102                 _last_video_eyes = EYES_LEFT;
1103                 _last_audio_time = time;
1104         } else {
1105                 _last_video_time = optional<DCPTime>();
1106                 _last_video_eyes = optional<Eyes>();
1107                 _last_audio_time = optional<DCPTime>();
1108         }
1109
1110         _black.set_position (time);
1111         _silent.set_position (time);
1112
1113         _last_video.clear ();
1114 }
1115
1116 void
1117 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1118 {
1119         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1120            player before the video that requires them.
1121         */
1122         _delay.push_back (make_pair (pv, time));
1123
1124         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1125                 _last_video_time = time + one_video_frame();
1126         }
1127         _last_video_eyes = increment_eyes (pv->eyes());
1128
1129         if (_delay.size() < 3) {
1130                 return;
1131         }
1132
1133         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1134         _delay.pop_front();
1135         do_emit_video (to_do.first, to_do.second);
1136 }
1137
1138 void
1139 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1140 {
1141         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1142                 for (int i = 0; i < TEXT_COUNT; ++i) {
1143                         _active_texts[i].clear_before (time);
1144                 }
1145         }
1146
1147         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1148         if (subtitles) {
1149                 pv->set_text (subtitles.get ());
1150         }
1151
1152         Video (pv, time);
1153 }
1154
1155 void
1156 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1157 {
1158         /* Log if the assert below is about to fail */
1159         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1160                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1161         }
1162
1163         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1164         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1165         Audio (data, time, _film->audio_frame_rate());
1166         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1167 }
1168
1169 void
1170 Player::fill_audio (DCPTimePeriod period)
1171 {
1172         if (period.from == period.to) {
1173                 return;
1174         }
1175
1176         DCPOMATIC_ASSERT (period.from < period.to);
1177
1178         DCPTime t = period.from;
1179         while (t < period.to) {
1180                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1181                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1182                 if (samples) {
1183                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1184                         silence->make_silent ();
1185                         emit_audio (silence, t);
1186                 }
1187                 t += block;
1188         }
1189 }
1190
1191 DCPTime
1192 Player::one_video_frame () const
1193 {
1194         return DCPTime::from_frames (1, _film->video_frame_rate ());
1195 }
1196
1197 pair<shared_ptr<AudioBuffers>, DCPTime>
1198 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1199 {
1200         DCPTime const discard_time = discard_to - time;
1201         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1202         Frame remaining_frames = audio->frames() - discard_frames;
1203         if (remaining_frames <= 0) {
1204                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1205         }
1206         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1207         return make_pair(cut, time + discard_time);
1208 }
1209
1210 void
1211 Player::set_dcp_decode_reduction (optional<int> reduction)
1212 {
1213         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1214
1215         {
1216                 boost::mutex::scoped_lock lm (_mutex);
1217
1218                 if (reduction == _dcp_decode_reduction) {
1219                         lm.unlock ();
1220                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1221                         return;
1222                 }
1223
1224                 _dcp_decode_reduction = reduction;
1225                 setup_pieces_unlocked ();
1226         }
1227
1228         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1229 }
1230
1231 optional<DCPTime>
1232 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1233 {
1234         boost::mutex::scoped_lock lm (_mutex);
1235
1236         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1237                 if (i->content == content) {
1238                         return content_time_to_dcp (i, t);
1239                 }
1240         }
1241
1242         /* We couldn't find this content; perhaps things are being changed over */
1243         return optional<DCPTime>();
1244 }
1245
1246
1247 shared_ptr<const Playlist>
1248 Player::playlist () const
1249 {
1250         return _playlist;
1251 }
1252