macOS / new boost build fixes.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
83 #endif
84 using namespace dcpomatic;
85
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92
93 Player::Player (shared_ptr<const Film> film)
94         : _film (film)
95         , _suspended (0)
96         , _ignore_video (false)
97         , _ignore_audio (false)
98         , _ignore_text (false)
99         , _always_burn_open_subtitles (false)
100         , _fast (false)
101         , _tolerant (film->tolerant())
102         , _play_referenced (false)
103         , _audio_merger (_film->audio_frame_rate())
104         , _shuffler (0)
105 {
106         construct ();
107 }
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _ignore_video (false)
114         , _ignore_audio (false)
115         , _ignore_text (false)
116         , _always_burn_open_subtitles (false)
117         , _fast (false)
118         , _tolerant (film->tolerant())
119         , _play_referenced (false)
120         , _audio_merger (_film->audio_frame_rate())
121         , _shuffler (0)
122 {
123         construct ();
124 }
125
126 void
127 Player::construct ()
128 {
129         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130         /* The butler must hear about this first, so since we are proxying this through to the butler we must
131            be first.
132         */
133         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135         set_video_container_size (_film->frame_size ());
136
137         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
138
139         setup_pieces ();
140         seek (DCPTime (), true);
141 }
142
143 Player::~Player ()
144 {
145         delete _shuffler;
146 }
147
148 void
149 Player::setup_pieces ()
150 {
151         boost::mutex::scoped_lock lm (_mutex);
152         setup_pieces_unlocked ();
153 }
154
155
156 bool
157 have_video (shared_ptr<const Content> content)
158 {
159         return static_cast<bool>(content->video) && content->video->use();
160 }
161
162 bool
163 have_audio (shared_ptr<const Content> content)
164 {
165         return static_cast<bool>(content->audio);
166 }
167
168 void
169 Player::setup_pieces_unlocked ()
170 {
171         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
172
173         list<shared_ptr<Piece> > old_pieces = _pieces;
174         _pieces.clear ();
175
176         delete _shuffler;
177         _shuffler = new Shuffler();
178         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
179
180         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
181
182                 if (!i->paths_valid ()) {
183                         continue;
184                 }
185
186                 if (_ignore_video && _ignore_audio && i->text.empty()) {
187                         /* We're only interested in text and this content has none */
188                         continue;
189                 }
190
191                 shared_ptr<Decoder> old_decoder;
192                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
193                         if (j->content == i) {
194                                 old_decoder = j->decoder;
195                                 break;
196                         }
197                 }
198
199                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
200                 DCPOMATIC_ASSERT (decoder);
201
202                 FrameRateChange frc (_film, i);
203
204                 if (decoder->video && _ignore_video) {
205                         decoder->video->set_ignore (true);
206                 }
207
208                 if (decoder->audio && _ignore_audio) {
209                         decoder->audio->set_ignore (true);
210                 }
211
212                 if (_ignore_text) {
213                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
214                                 i->set_ignore (true);
215                         }
216                 }
217
218                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
219                 if (dcp) {
220                         dcp->set_decode_referenced (_play_referenced);
221                         if (_play_referenced) {
222                                 dcp->set_forced_reduction (_dcp_decode_reduction);
223                         }
224                 }
225
226                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
227                 _pieces.push_back (piece);
228
229                 if (decoder->video) {
230                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
231                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
232                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
233                         } else {
234                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
235                         }
236                 }
237
238                 if (decoder->audio) {
239                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240                 }
241
242                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
243
244                 while (j != decoder->text.end()) {
245                         (*j)->BitmapStart.connect (
246                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->PlainStart.connect (
249                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251                         (*j)->Stop.connect (
252                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
253                                 );
254
255                         ++j;
256                 }
257
258                 if (decoder->atmos) {
259                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260                 }
261         }
262
263         _stream_states.clear ();
264         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
265                 if (i->content->audio) {
266                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
267                                 _stream_states[j] = StreamState (i, i->content->position ());
268                         }
269                 }
270         }
271
272         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
273         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
274
275         _last_video_time = DCPTime ();
276         _last_video_eyes = EYES_BOTH;
277         _last_audio_time = DCPTime ();
278 }
279
280 void
281 Player::playlist_content_change (ChangeType type, int property, bool frequent)
282 {
283         if (type == CHANGE_TYPE_PENDING) {
284                 /* The player content is probably about to change, so we can't carry on
285                    until that has happened and we've rebuilt our pieces.  Stop pass()
286                    and seek() from working until then.
287                 */
288                 ++_suspended;
289         } else if (type == CHANGE_TYPE_DONE) {
290                 /* A change in our content has gone through.  Re-build our pieces. */
291                 setup_pieces ();
292                 --_suspended;
293         } else if (type == CHANGE_TYPE_CANCELLED) {
294                 --_suspended;
295         }
296
297         Change (type, property, frequent);
298 }
299
300 void
301 Player::set_video_container_size (dcp::Size s)
302 {
303         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
304
305         {
306                 boost::mutex::scoped_lock lm (_mutex);
307
308                 if (s == _video_container_size) {
309                         lm.unlock ();
310                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
311                         return;
312                 }
313
314                 _video_container_size = s;
315
316                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
317                 _black_image->make_black ();
318         }
319
320         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321 }
322
323 void
324 Player::playlist_change (ChangeType type)
325 {
326         if (type == CHANGE_TYPE_DONE) {
327                 setup_pieces ();
328         }
329         Change (type, PlayerProperty::PLAYLIST, false);
330 }
331
332 void
333 Player::film_change (ChangeType type, Film::Property p)
334 {
335         /* Here we should notice Film properties that affect our output, and
336            alert listeners that our output now would be different to how it was
337            last time we were run.
338         */
339
340         if (p == Film::CONTAINER) {
341                 Change (type, PlayerProperty::FILM_CONTAINER, false);
342         } else if (p == Film::VIDEO_FRAME_RATE) {
343                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
344                    so we need new pieces here.
345                 */
346                 if (type == CHANGE_TYPE_DONE) {
347                         setup_pieces ();
348                 }
349                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
350         } else if (p == Film::AUDIO_PROCESSOR) {
351                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
352                         boost::mutex::scoped_lock lm (_mutex);
353                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
354                 }
355         } else if (p == Film::AUDIO_CHANNELS) {
356                 if (type == CHANGE_TYPE_DONE) {
357                         boost::mutex::scoped_lock lm (_mutex);
358                         _audio_merger.clear ();
359                 }
360         }
361 }
362
363 shared_ptr<PlayerVideo>
364 Player::black_player_video_frame (Eyes eyes) const
365 {
366         return shared_ptr<PlayerVideo> (
367                 new PlayerVideo (
368                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
369                         Crop (),
370                         optional<double> (),
371                         _video_container_size,
372                         _video_container_size,
373                         eyes,
374                         PART_WHOLE,
375                         PresetColourConversion::all().front().conversion,
376                         VIDEO_RANGE_FULL,
377                         boost::weak_ptr<Content>(),
378                         boost::optional<Frame>(),
379                         false
380                 )
381         );
382 }
383
384 Frame
385 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
386 {
387         DCPTime s = t - piece->content->position ();
388         s = min (piece->content->length_after_trim(_film), s);
389         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
390
391         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
392            then convert that ContentTime to frames at the content's rate.  However this fails for
393            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
394            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
395
396            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
397         */
398         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
399 }
400
401 DCPTime
402 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
403 {
404         /* See comment in dcp_to_content_video */
405         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
406         return d + piece->content->position();
407 }
408
409 Frame
410 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
411 {
412         DCPTime s = t - piece->content->position ();
413         s = min (piece->content->length_after_trim(_film), s);
414         /* See notes in dcp_to_content_video */
415         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
416 }
417
418 DCPTime
419 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
420 {
421         /* See comment in dcp_to_content_video */
422         return DCPTime::from_frames (f, _film->audio_frame_rate())
423                 - DCPTime (piece->content->trim_start(), piece->frc)
424                 + piece->content->position();
425 }
426
427 ContentTime
428 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
429 {
430         DCPTime s = t - piece->content->position ();
431         s = min (piece->content->length_after_trim(_film), s);
432         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
433 }
434
435 DCPTime
436 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
437 {
438         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
439 }
440
441 list<shared_ptr<Font> >
442 Player::get_subtitle_fonts ()
443 {
444         boost::mutex::scoped_lock lm (_mutex);
445
446         list<shared_ptr<Font> > fonts;
447         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
448                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
449                         /* XXX: things may go wrong if there are duplicate font IDs
450                            with different font files.
451                         */
452                         list<shared_ptr<Font> > f = j->fonts ();
453                         copy (f.begin(), f.end(), back_inserter (fonts));
454                 }
455         }
456
457         return fonts;
458 }
459
460 /** Set this player never to produce any video data */
461 void
462 Player::set_ignore_video ()
463 {
464         boost::mutex::scoped_lock lm (_mutex);
465         _ignore_video = true;
466         setup_pieces_unlocked ();
467 }
468
469 void
470 Player::set_ignore_audio ()
471 {
472         boost::mutex::scoped_lock lm (_mutex);
473         _ignore_audio = true;
474         setup_pieces_unlocked ();
475 }
476
477 void
478 Player::set_ignore_text ()
479 {
480         boost::mutex::scoped_lock lm (_mutex);
481         _ignore_text = true;
482         setup_pieces_unlocked ();
483 }
484
485 /** Set the player to always burn open texts into the image regardless of the content settings */
486 void
487 Player::set_always_burn_open_subtitles ()
488 {
489         boost::mutex::scoped_lock lm (_mutex);
490         _always_burn_open_subtitles = true;
491 }
492
493 /** Sets up the player to be faster, possibly at the expense of quality */
494 void
495 Player::set_fast ()
496 {
497         boost::mutex::scoped_lock lm (_mutex);
498         _fast = true;
499         setup_pieces_unlocked ();
500 }
501
502 void
503 Player::set_play_referenced ()
504 {
505         boost::mutex::scoped_lock lm (_mutex);
506         _play_referenced = true;
507         setup_pieces_unlocked ();
508 }
509
510 static void
511 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
512 {
513         DCPOMATIC_ASSERT (r);
514         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
515         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
516         if (r->actual_duration() > 0) {
517                 a.push_back (
518                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
519                         );
520         }
521 }
522
523 list<ReferencedReelAsset>
524 Player::get_reel_assets ()
525 {
526         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
527
528         list<ReferencedReelAsset> a;
529
530         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
531                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
532                 if (!j) {
533                         continue;
534                 }
535
536                 scoped_ptr<DCPDecoder> decoder;
537                 try {
538                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
539                 } catch (...) {
540                         return a;
541                 }
542
543                 DCPOMATIC_ASSERT (j->video_frame_rate ());
544                 double const cfr = j->video_frame_rate().get();
545                 Frame const trim_start = j->trim_start().frames_round (cfr);
546                 Frame const trim_end = j->trim_end().frames_round (cfr);
547                 int const ffr = _film->video_frame_rate ();
548
549                 /* position in the asset from the start */
550                 int64_t offset_from_start = 0;
551                 /* position in the asset from the end */
552                 int64_t offset_from_end = 0;
553                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
554                         /* Assume that main picture duration is the length of the reel */
555                         offset_from_end += k->main_picture()->actual_duration();
556                 }
557
558                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
559
560                         /* Assume that main picture duration is the length of the reel */
561                         int64_t const reel_duration = k->main_picture()->actual_duration();
562
563                         /* See doc/design/trim_reels.svg */
564                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
565                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
566
567                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
568                         if (j->reference_video ()) {
569                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
570                         }
571
572                         if (j->reference_audio ()) {
573                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
574                         }
575
576                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
577                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
578                         }
579
580                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
581                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
582                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
583                                 }
584                         }
585
586                         offset_from_start += reel_duration;
587                         offset_from_end -= reel_duration;
588                 }
589         }
590
591         return a;
592 }
593
594 bool
595 Player::pass ()
596 {
597         boost::mutex::scoped_lock lm (_mutex);
598
599         if (_suspended) {
600                 /* We can't pass in this state */
601                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
602                 return false;
603         }
604
605         if (_playback_length == DCPTime()) {
606                 /* Special; just give one black frame */
607                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
608                 return true;
609         }
610
611         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
612
613         shared_ptr<Piece> earliest_content;
614         optional<DCPTime> earliest_time;
615
616         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
617                 if (i->done) {
618                         continue;
619                 }
620
621                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
622                 if (t > i->content->end(_film)) {
623                         i->done = true;
624                 } else {
625
626                         /* Given two choices at the same time, pick the one with texts so we see it before
627                            the video.
628                         */
629                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
630                                 earliest_time = t;
631                                 earliest_content = i;
632                         }
633                 }
634         }
635
636         bool done = false;
637
638         enum {
639                 NONE,
640                 CONTENT,
641                 BLACK,
642                 SILENT
643         } which = NONE;
644
645         if (earliest_content) {
646                 which = CONTENT;
647         }
648
649         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
650                 earliest_time = _black.position ();
651                 which = BLACK;
652         }
653
654         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
655                 earliest_time = _silent.position ();
656                 which = SILENT;
657         }
658
659         switch (which) {
660         case CONTENT:
661         {
662                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
663                 earliest_content->done = earliest_content->decoder->pass ();
664                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
665                 if (dcp && !_play_referenced && dcp->reference_audio()) {
666                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
667                            to `hide' the fact that no audio was emitted during the referenced DCP (though
668                            we need to behave as though it was).
669                         */
670                         _last_audio_time = dcp->end (_film);
671                 }
672                 break;
673         }
674         case BLACK:
675                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
676                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
677                 _black.set_position (_black.position() + one_video_frame());
678                 break;
679         case SILENT:
680         {
681                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
682                 DCPTimePeriod period (_silent.period_at_position());
683                 if (_last_audio_time) {
684                         /* Sometimes the thing that happened last finishes fractionally before
685                            or after this silence.  Bodge the start time of the silence to fix it.
686                            I think this is nothing to worry about since we will just add or
687                            remove a little silence at the end of some content.
688                         */
689                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
690                         /* Let's not worry about less than a frame at 24fps */
691                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
692                         if (error >= too_much_error) {
693                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
694                         }
695                         DCPOMATIC_ASSERT (error < too_much_error);
696                         period.from = *_last_audio_time;
697                 }
698                 if (period.duration() > one_video_frame()) {
699                         period.to = period.from + one_video_frame();
700                 }
701                 fill_audio (period);
702                 _silent.set_position (period.to);
703                 break;
704         }
705         case NONE:
706                 done = true;
707                 break;
708         }
709
710         /* Emit any audio that is ready */
711
712         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
713            of our streams, or the position of the _silent.
714         */
715         DCPTime pull_to = _playback_length;
716         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
717                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
718                         pull_to = i->second.last_push_end;
719                 }
720         }
721         if (!_silent.done() && _silent.position() < pull_to) {
722                 pull_to = _silent.position();
723         }
724
725         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
726         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
727         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
728                 if (_last_audio_time && i->second < *_last_audio_time) {
729                         /* This new data comes before the last we emitted (or the last seek); discard it */
730                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
731                         if (!cut.first) {
732                                 continue;
733                         }
734                         *i = cut;
735                 } else if (_last_audio_time && i->second > *_last_audio_time) {
736                         /* There's a gap between this data and the last we emitted; fill with silence */
737                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
738                 }
739
740                 emit_audio (i->first, i->second);
741         }
742
743         if (done) {
744                 _shuffler->flush ();
745                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
746                         do_emit_video(i->first, i->second);
747                 }
748         }
749
750         return done;
751 }
752
753 /** @return Open subtitles for the frame at the given time, converted to images */
754 optional<PositionImage>
755 Player::open_subtitles_for_frame (DCPTime time) const
756 {
757         list<PositionImage> captions;
758         int const vfr = _film->video_frame_rate();
759
760         BOOST_FOREACH (
761                 PlayerText j,
762                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
763                 ) {
764
765                 /* Bitmap subtitles */
766                 BOOST_FOREACH (BitmapText i, j.bitmap) {
767                         if (!i.image) {
768                                 continue;
769                         }
770
771                         /* i.image will already have been scaled to fit _video_container_size */
772                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
773
774                         captions.push_back (
775                                 PositionImage (
776                                         i.image,
777                                         Position<int> (
778                                                 lrint (_video_container_size.width * i.rectangle.x),
779                                                 lrint (_video_container_size.height * i.rectangle.y)
780                                                 )
781                                         )
782                                 );
783                 }
784
785                 /* String subtitles (rendered to an image) */
786                 if (!j.string.empty ()) {
787                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
788                         copy (s.begin(), s.end(), back_inserter (captions));
789                 }
790         }
791
792         if (captions.empty ()) {
793                 return optional<PositionImage> ();
794         }
795
796         return merge (captions);
797 }
798
799 void
800 Player::video (weak_ptr<Piece> wp, ContentVideo video)
801 {
802         shared_ptr<Piece> piece = wp.lock ();
803         if (!piece) {
804                 return;
805         }
806
807         if (!piece->content->video->use()) {
808                 return;
809         }
810
811         FrameRateChange frc (_film, piece->content);
812         if (frc.skip && (video.frame % 2) == 1) {
813                 return;
814         }
815
816         /* Time of the first frame we will emit */
817         DCPTime const time = content_video_to_dcp (piece, video.frame);
818         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
819
820         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
821            if it's after the content's period here as in that case we still need to fill any gap between
822            `now' and the end of the content's period.
823         */
824         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
825                 return;
826         }
827
828         /* Fill gaps that we discover now that we have some video which needs to be emitted.
829            This is where we need to fill to.
830         */
831         DCPTime fill_to = min (time, piece->content->end(_film));
832
833         if (_last_video_time) {
834                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
835
836                 /* Fill if we have more than half a frame to do */
837                 if ((fill_to - fill_from) > one_video_frame() / 2) {
838                         LastVideoMap::const_iterator last = _last_video.find (wp);
839                         if (_film->three_d()) {
840                                 Eyes fill_to_eyes = video.eyes;
841                                 if (fill_to_eyes == EYES_BOTH) {
842                                         fill_to_eyes = EYES_LEFT;
843                                 }
844                                 if (fill_to == piece->content->end(_film)) {
845                                         /* Don't fill after the end of the content */
846                                         fill_to_eyes = EYES_LEFT;
847                                 }
848                                 DCPTime j = fill_from;
849                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
850                                 if (eyes == EYES_BOTH) {
851                                         eyes = EYES_LEFT;
852                                 }
853                                 while (j < fill_to || eyes != fill_to_eyes) {
854                                         if (last != _last_video.end()) {
855                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
856                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
857                                                 copy->set_eyes (eyes);
858                                                 emit_video (copy, j);
859                                         } else {
860                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
861                                                 emit_video (black_player_video_frame(eyes), j);
862                                         }
863                                         if (eyes == EYES_RIGHT) {
864                                                 j += one_video_frame();
865                                         }
866                                         eyes = increment_eyes (eyes);
867                                 }
868                         } else {
869                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
870                                         if (last != _last_video.end()) {
871                                                 emit_video (last->second, j);
872                                         } else {
873                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
874                                         }
875                                 }
876                         }
877                 }
878         }
879
880         _last_video[wp].reset (
881                 new PlayerVideo (
882                         video.image,
883                         piece->content->video->crop (),
884                         piece->content->video->fade (_film, video.frame),
885                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
886                         _video_container_size,
887                         video.eyes,
888                         video.part,
889                         piece->content->video->colour_conversion(),
890                         piece->content->video->range(),
891                         piece->content,
892                         video.frame,
893                         false
894                         )
895                 );
896
897         DCPTime t = time;
898         for (int i = 0; i < frc.repeat; ++i) {
899                 if (t < piece->content->end(_film)) {
900                         emit_video (_last_video[wp], t);
901                 }
902                 t += one_video_frame ();
903         }
904 }
905
906 void
907 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
908 {
909         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
910
911         shared_ptr<Piece> piece = wp.lock ();
912         if (!piece) {
913                 return;
914         }
915
916         shared_ptr<AudioContent> content = piece->content->audio;
917         DCPOMATIC_ASSERT (content);
918
919         int const rfr = content->resampled_frame_rate (_film);
920
921         /* Compute time in the DCP */
922         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
923         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
924
925         /* And the end of this block in the DCP */
926         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
927
928         /* Remove anything that comes before the start or after the end of the content */
929         if (time < piece->content->position()) {
930                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
931                 if (!cut.first) {
932                         /* This audio is entirely discarded */
933                         return;
934                 }
935                 content_audio.audio = cut.first;
936                 time = cut.second;
937         } else if (time > piece->content->end(_film)) {
938                 /* Discard it all */
939                 return;
940         } else if (end > piece->content->end(_film)) {
941                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
942                 if (remaining_frames == 0) {
943                         return;
944                 }
945                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
946         }
947
948         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
949
950         /* Gain */
951
952         if (content->gain() != 0) {
953                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
954                 gain->apply_gain (content->gain ());
955                 content_audio.audio = gain;
956         }
957
958         /* Remap */
959
960         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
961
962         /* Process */
963
964         if (_audio_processor) {
965                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
966         }
967
968         /* Push */
969
970         _audio_merger.push (content_audio.audio, time);
971         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
972         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
973 }
974
975 void
976 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
977 {
978         shared_ptr<Piece> piece = wp.lock ();
979         shared_ptr<const TextContent> text = wc.lock ();
980         if (!piece || !text) {
981                 return;
982         }
983
984         /* Apply content's subtitle offsets */
985         subtitle.sub.rectangle.x += text->x_offset ();
986         subtitle.sub.rectangle.y += text->y_offset ();
987
988         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
989         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
990         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
991
992         /* Apply content's subtitle scale */
993         subtitle.sub.rectangle.width *= text->x_scale ();
994         subtitle.sub.rectangle.height *= text->y_scale ();
995
996         PlayerText ps;
997         shared_ptr<Image> image = subtitle.sub.image;
998
999         /* We will scale the subtitle up to fit _video_container_size */
1000         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1001         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1002         if (width == 0 || height == 0) {
1003                 return;
1004         }
1005
1006         dcp::Size scaled_size (width, height);
1007         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1008         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1009
1010         _active_texts[text->type()].add_from (wc, ps, from);
1011 }
1012
1013 void
1014 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1015 {
1016         shared_ptr<Piece> piece = wp.lock ();
1017         shared_ptr<const TextContent> text = wc.lock ();
1018         if (!piece || !text) {
1019                 return;
1020         }
1021
1022         PlayerText ps;
1023         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1024
1025         if (from > piece->content->end(_film)) {
1026                 return;
1027         }
1028
1029         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1030                 s.set_h_position (s.h_position() + text->x_offset ());
1031                 s.set_v_position (s.v_position() + text->y_offset ());
1032                 float const xs = text->x_scale();
1033                 float const ys = text->y_scale();
1034                 float size = s.size();
1035
1036                 /* Adjust size to express the common part of the scaling;
1037                    e.g. if xs = ys = 0.5 we scale size by 2.
1038                 */
1039                 if (xs > 1e-5 && ys > 1e-5) {
1040                         size *= 1 / min (1 / xs, 1 / ys);
1041                 }
1042                 s.set_size (size);
1043
1044                 /* Then express aspect ratio changes */
1045                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1046                         s.set_aspect_adjust (xs / ys);
1047                 }
1048
1049                 s.set_in (dcp::Time(from.seconds(), 1000));
1050                 ps.string.push_back (StringText (s, text->outline_width()));
1051                 ps.add_fonts (text->fonts ());
1052         }
1053
1054         _active_texts[text->type()].add_from (wc, ps, from);
1055 }
1056
1057 void
1058 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1059 {
1060         shared_ptr<const TextContent> text = wc.lock ();
1061         if (!text) {
1062                 return;
1063         }
1064
1065         if (!_active_texts[text->type()].have(wc)) {
1066                 return;
1067         }
1068
1069         shared_ptr<Piece> piece = wp.lock ();
1070         if (!piece) {
1071                 return;
1072         }
1073
1074         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1075
1076         if (dcp_to > piece->content->end(_film)) {
1077                 return;
1078         }
1079
1080         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1081
1082         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1083         if (text->use() && !always && !text->burn()) {
1084                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1085         }
1086 }
1087
1088 void
1089 Player::seek (DCPTime time, bool accurate)
1090 {
1091         boost::mutex::scoped_lock lm (_mutex);
1092         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1093
1094         if (_suspended) {
1095                 /* We can't seek in this state */
1096                 return;
1097         }
1098
1099         if (_shuffler) {
1100                 _shuffler->clear ();
1101         }
1102
1103         _delay.clear ();
1104
1105         if (_audio_processor) {
1106                 _audio_processor->flush ();
1107         }
1108
1109         _audio_merger.clear ();
1110         for (int i = 0; i < TEXT_COUNT; ++i) {
1111                 _active_texts[i].clear ();
1112         }
1113
1114         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1115                 if (time < i->content->position()) {
1116                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1117                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1118                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1119                            been trimmed to a point between keyframes, or something).
1120                         */
1121                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1122                         i->done = false;
1123                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1124                         /* During; seek to position */
1125                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1126                         i->done = false;
1127                 } else {
1128                         /* After; this piece is done */
1129                         i->done = true;
1130                 }
1131         }
1132
1133         if (accurate) {
1134                 _last_video_time = time;
1135                 _last_video_eyes = EYES_LEFT;
1136                 _last_audio_time = time;
1137         } else {
1138                 _last_video_time = optional<DCPTime>();
1139                 _last_video_eyes = optional<Eyes>();
1140                 _last_audio_time = optional<DCPTime>();
1141         }
1142
1143         _black.set_position (time);
1144         _silent.set_position (time);
1145
1146         _last_video.clear ();
1147 }
1148
1149 void
1150 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1151 {
1152         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1153            player before the video that requires them.
1154         */
1155         _delay.push_back (make_pair (pv, time));
1156
1157         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1158                 _last_video_time = time + one_video_frame();
1159         }
1160         _last_video_eyes = increment_eyes (pv->eyes());
1161
1162         if (_delay.size() < 3) {
1163                 return;
1164         }
1165
1166         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1167         _delay.pop_front();
1168         do_emit_video (to_do.first, to_do.second);
1169 }
1170
1171 void
1172 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1173 {
1174         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1175                 for (int i = 0; i < TEXT_COUNT; ++i) {
1176                         _active_texts[i].clear_before (time);
1177                 }
1178         }
1179
1180         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1181         if (subtitles) {
1182                 pv->set_text (subtitles.get ());
1183         }
1184
1185         Video (pv, time);
1186 }
1187
1188 void
1189 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1190 {
1191         /* Log if the assert below is about to fail */
1192         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1193                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1194         }
1195
1196         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1197         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1198         Audio (data, time, _film->audio_frame_rate());
1199         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1200 }
1201
1202 void
1203 Player::fill_audio (DCPTimePeriod period)
1204 {
1205         if (period.from == period.to) {
1206                 return;
1207         }
1208
1209         DCPOMATIC_ASSERT (period.from < period.to);
1210
1211         DCPTime t = period.from;
1212         while (t < period.to) {
1213                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1214                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1215                 if (samples) {
1216                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1217                         silence->make_silent ();
1218                         emit_audio (silence, t);
1219                 }
1220                 t += block;
1221         }
1222 }
1223
1224 DCPTime
1225 Player::one_video_frame () const
1226 {
1227         return DCPTime::from_frames (1, _film->video_frame_rate ());
1228 }
1229
1230 pair<shared_ptr<AudioBuffers>, DCPTime>
1231 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1232 {
1233         DCPTime const discard_time = discard_to - time;
1234         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1235         Frame remaining_frames = audio->frames() - discard_frames;
1236         if (remaining_frames <= 0) {
1237                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1238         }
1239         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1240         return make_pair(cut, time + discard_time);
1241 }
1242
1243 void
1244 Player::set_dcp_decode_reduction (optional<int> reduction)
1245 {
1246         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1247
1248         {
1249                 boost::mutex::scoped_lock lm (_mutex);
1250
1251                 if (reduction == _dcp_decode_reduction) {
1252                         lm.unlock ();
1253                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1254                         return;
1255                 }
1256
1257                 _dcp_decode_reduction = reduction;
1258                 setup_pieces_unlocked ();
1259         }
1260
1261         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1262 }
1263
1264 optional<DCPTime>
1265 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1266 {
1267         boost::mutex::scoped_lock lm (_mutex);
1268
1269         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1270                 if (i->content == content) {
1271                         return content_time_to_dcp (i, t);
1272                 }
1273         }
1274
1275         /* We couldn't find this content; perhaps things are being changed over */
1276         return optional<DCPTime>();
1277 }
1278
1279
1280 shared_ptr<const Playlist>
1281 Player::playlist () const
1282 {
1283         return _playlist ? _playlist : _film->playlist();
1284 }
1285
1286
1287 void
1288 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1289 {
1290         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1291 }
1292