Ignore incoming data when suspended.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         if (j->content == i) {
191                                 old_decoder = j->decoder;
192                                 break;
193                         }
194                 }
195
196                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197                 DCPOMATIC_ASSERT (decoder);
198
199                 FrameRateChange frc (_film, i);
200
201                 if (decoder->video && _ignore_video) {
202                         decoder->video->set_ignore (true);
203                 }
204
205                 if (decoder->audio && _ignore_audio) {
206                         decoder->audio->set_ignore (true);
207                 }
208
209                 if (_ignore_text) {
210                         for (auto i: decoder->text) {
211                                 i->set_ignore (true);
212                         }
213                 }
214
215                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
216                 if (dcp) {
217                         dcp->set_decode_referenced (_play_referenced);
218                         if (_play_referenced) {
219                                 dcp->set_forced_reduction (_dcp_decode_reduction);
220                         }
221                 }
222
223                 auto piece = make_shared<Piece>(i, decoder, frc);
224                 _pieces.push_back (piece);
225
226                 if (decoder->video) {
227                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
228                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
230                         } else {
231                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
232                         }
233                 }
234
235                 if (decoder->audio) {
236                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
237                 }
238
239                 auto j = decoder->text.begin();
240
241                 while (j != decoder->text.end()) {
242                         (*j)->BitmapStart.connect (
243                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245                         (*j)->PlainStart.connect (
246                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->Stop.connect (
249                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251
252                         ++j;
253                 }
254
255                 if (decoder->atmos) {
256                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
257                 }
258         }
259
260         _stream_states.clear ();
261         for (auto i: _pieces) {
262                 if (i->content->audio) {
263                         for (auto j: i->content->audio->streams()) {
264                                 _stream_states[j] = StreamState (i, i->content->position ());
265                         }
266                 }
267         }
268
269         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
270                 if (auto video = (*i)->content->video) {
271                         if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
272                                 /* Look for content later in the content list with in-use video that overlaps this */
273                                 auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
274                                 auto j = i;
275                                 ++j;
276                                 for (; j != _pieces.end(); ++j) {
277                                         if ((*j)->content->video && (*j)->content->video->use()) {
278                                                 (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
279                                         }
280                                 }
281                         }
282                 }
283         }
284
285         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
286         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
287
288         _last_video_time = boost::optional<dcpomatic::DCPTime>();
289         _last_video_eyes = Eyes::BOTH;
290         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
291 }
292
293
294 void
295 Player::playlist_content_change (ChangeType type, int property, bool frequent)
296 {
297         if (property == VideoContentProperty::CROP) {
298                 if (type == ChangeType::DONE) {
299                         auto const vcs = video_container_size();
300                         boost::mutex::scoped_lock lm (_mutex);
301                         for (auto const& i: _delay) {
302                                 i.first->reset_metadata (_film, vcs);
303                         }
304                 }
305         } else {
306                 if (type == ChangeType::PENDING) {
307                         /* The player content is probably about to change, so we can't carry on
308                            until that has happened and we've rebuilt our pieces.  Stop pass()
309                            and seek() from working until then.
310                         */
311                         ++_suspended;
312                 } else if (type == ChangeType::DONE) {
313                         /* A change in our content has gone through.  Re-build our pieces. */
314                         setup_pieces ();
315                         --_suspended;
316                 } else if (type == ChangeType::CANCELLED) {
317                         --_suspended;
318                 }
319         }
320
321         Change (type, property, frequent);
322 }
323
324
325 void
326 Player::set_video_container_size (dcp::Size s)
327 {
328         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
329
330         {
331                 boost::mutex::scoped_lock lm (_mutex);
332
333                 if (s == _video_container_size) {
334                         lm.unlock ();
335                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
336                         return;
337                 }
338
339                 _video_container_size = s;
340
341                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
342                 _black_image->make_black ();
343         }
344
345         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
346 }
347
348
349 void
350 Player::playlist_change (ChangeType type)
351 {
352         if (type == ChangeType::DONE) {
353                 setup_pieces ();
354         }
355         Change (type, PlayerProperty::PLAYLIST, false);
356 }
357
358
359 void
360 Player::film_change (ChangeType type, Film::Property p)
361 {
362         /* Here we should notice Film properties that affect our output, and
363            alert listeners that our output now would be different to how it was
364            last time we were run.
365         */
366
367         if (p == Film::Property::CONTAINER) {
368                 Change (type, PlayerProperty::FILM_CONTAINER, false);
369         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
370                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
371                    so we need new pieces here.
372                 */
373                 if (type == ChangeType::DONE) {
374                         setup_pieces ();
375                 }
376                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
377         } else if (p == Film::Property::AUDIO_PROCESSOR) {
378                 if (type == ChangeType::DONE && _film->audio_processor ()) {
379                         boost::mutex::scoped_lock lm (_mutex);
380                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
381                 }
382         } else if (p == Film::Property::AUDIO_CHANNELS) {
383                 if (type == ChangeType::DONE) {
384                         boost::mutex::scoped_lock lm (_mutex);
385                         _audio_merger.clear ();
386                 }
387         }
388 }
389
390
391 shared_ptr<PlayerVideo>
392 Player::black_player_video_frame (Eyes eyes) const
393 {
394         return std::make_shared<PlayerVideo> (
395                 std::make_shared<const RawImageProxy>(_black_image),
396                 Crop(),
397                 optional<double>(),
398                 _video_container_size,
399                 _video_container_size,
400                 eyes,
401                 Part::WHOLE,
402                 PresetColourConversion::all().front().conversion,
403                 VideoRange::FULL,
404                 std::weak_ptr<Content>(),
405                 boost::optional<Frame>(),
406                 false
407         );
408 }
409
410
411 Frame
412 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
413 {
414         auto s = t - piece->content->position ();
415         s = min (piece->content->length_after_trim(_film), s);
416         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
417
418         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
419            then convert that ContentTime to frames at the content's rate.  However this fails for
420            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
421            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
422
423            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
424         */
425         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
426 }
427
428
429 DCPTime
430 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
431 {
432         /* See comment in dcp_to_content_video */
433         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
434         return d + piece->content->position();
435 }
436
437
438 Frame
439 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
440 {
441         auto s = t - piece->content->position ();
442         s = min (piece->content->length_after_trim(_film), s);
443         /* See notes in dcp_to_content_video */
444         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
445 }
446
447
448 DCPTime
449 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
450 {
451         /* See comment in dcp_to_content_video */
452         return DCPTime::from_frames (f, _film->audio_frame_rate())
453                 - DCPTime (piece->content->trim_start(), piece->frc)
454                 + piece->content->position();
455 }
456
457
458 ContentTime
459 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
460 {
461         auto s = t - piece->content->position ();
462         s = min (piece->content->length_after_trim(_film), s);
463         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
464 }
465
466
467 DCPTime
468 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
469 {
470         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
471 }
472
473
474 vector<FontData>
475 Player::get_subtitle_fonts ()
476 {
477         boost::mutex::scoped_lock lm (_mutex);
478
479         vector<FontData> fonts;
480         for (auto i: _pieces) {
481                 /* XXX: things may go wrong if there are duplicate font IDs
482                    with different font files.
483                 */
484                 auto f = i->decoder->fonts ();
485                 copy (f.begin(), f.end(), back_inserter(fonts));
486         }
487
488         return fonts;
489 }
490
491
492 /** Set this player never to produce any video data */
493 void
494 Player::set_ignore_video ()
495 {
496         boost::mutex::scoped_lock lm (_mutex);
497         _ignore_video = true;
498         setup_pieces_unlocked ();
499 }
500
501
502 void
503 Player::set_ignore_audio ()
504 {
505         boost::mutex::scoped_lock lm (_mutex);
506         _ignore_audio = true;
507         setup_pieces_unlocked ();
508 }
509
510
511 void
512 Player::set_ignore_text ()
513 {
514         boost::mutex::scoped_lock lm (_mutex);
515         _ignore_text = true;
516         setup_pieces_unlocked ();
517 }
518
519
520 /** Set the player to always burn open texts into the image regardless of the content settings */
521 void
522 Player::set_always_burn_open_subtitles ()
523 {
524         boost::mutex::scoped_lock lm (_mutex);
525         _always_burn_open_subtitles = true;
526 }
527
528
529 /** Sets up the player to be faster, possibly at the expense of quality */
530 void
531 Player::set_fast ()
532 {
533         boost::mutex::scoped_lock lm (_mutex);
534         _fast = true;
535         setup_pieces_unlocked ();
536 }
537
538
539 void
540 Player::set_play_referenced ()
541 {
542         boost::mutex::scoped_lock lm (_mutex);
543         _play_referenced = true;
544         setup_pieces_unlocked ();
545 }
546
547
548 static void
549 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
550 {
551         DCPOMATIC_ASSERT (r);
552         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
553         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
554         if (r->actual_duration() > 0) {
555                 a.push_back (
556                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
557                         );
558         }
559 }
560
561
562 list<ReferencedReelAsset>
563 Player::get_reel_assets ()
564 {
565         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
566
567         list<ReferencedReelAsset> a;
568
569         for (auto i: playlist()->content()) {
570                 auto j = dynamic_pointer_cast<DCPContent> (i);
571                 if (!j) {
572                         continue;
573                 }
574
575                 scoped_ptr<DCPDecoder> decoder;
576                 try {
577                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
578                 } catch (...) {
579                         return a;
580                 }
581
582                 DCPOMATIC_ASSERT (j->video_frame_rate ());
583                 double const cfr = j->video_frame_rate().get();
584                 Frame const trim_start = j->trim_start().frames_round (cfr);
585                 Frame const trim_end = j->trim_end().frames_round (cfr);
586                 int const ffr = _film->video_frame_rate ();
587
588                 /* position in the asset from the start */
589                 int64_t offset_from_start = 0;
590                 /* position in the asset from the end */
591                 int64_t offset_from_end = 0;
592                 for (auto k: decoder->reels()) {
593                         /* Assume that main picture duration is the length of the reel */
594                         offset_from_end += k->main_picture()->actual_duration();
595                 }
596
597                 for (auto k: decoder->reels()) {
598
599                         /* Assume that main picture duration is the length of the reel */
600                         int64_t const reel_duration = k->main_picture()->actual_duration();
601
602                         /* See doc/design/trim_reels.svg */
603                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
604                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
605
606                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
607                         if (j->reference_video ()) {
608                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
609                         }
610
611                         if (j->reference_audio ()) {
612                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
613                         }
614
615                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
616                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
617                         }
618
619                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
620                                 for (auto l: k->closed_captions()) {
621                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
622                                 }
623                         }
624
625                         offset_from_start += reel_duration;
626                         offset_from_end -= reel_duration;
627                 }
628         }
629
630         return a;
631 }
632
633
634 bool
635 Player::pass ()
636 {
637         boost::mutex::scoped_lock lm (_mutex);
638
639         if (_suspended) {
640                 /* We can't pass in this state */
641                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
642                 return false;
643         }
644
645         if (_playback_length == DCPTime()) {
646                 /* Special; just give one black frame */
647                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
648                 return true;
649         }
650
651         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
652
653         shared_ptr<Piece> earliest_content;
654         optional<DCPTime> earliest_time;
655
656         for (auto i: _pieces) {
657                 if (i->done) {
658                         continue;
659                 }
660
661                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
662                 if (t > i->content->end(_film)) {
663                         i->done = true;
664                 } else {
665
666                         /* Given two choices at the same time, pick the one with texts so we see it before
667                            the video.
668                         */
669                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
670                                 earliest_time = t;
671                                 earliest_content = i;
672                         }
673                 }
674         }
675
676         bool done = false;
677
678         enum {
679                 NONE,
680                 CONTENT,
681                 BLACK,
682                 SILENT
683         } which = NONE;
684
685         if (earliest_content) {
686                 which = CONTENT;
687         }
688
689         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
690                 earliest_time = _black.position ();
691                 which = BLACK;
692         }
693
694         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
695                 earliest_time = _silent.position ();
696                 which = SILENT;
697         }
698
699         switch (which) {
700         case CONTENT:
701         {
702                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
703                 earliest_content->done = earliest_content->decoder->pass ();
704                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
705                 if (dcp && !_play_referenced && dcp->reference_audio()) {
706                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
707                            to `hide' the fact that no audio was emitted during the referenced DCP (though
708                            we need to behave as though it was).
709                         */
710                         _last_audio_time = dcp->end (_film);
711                 }
712                 break;
713         }
714         case BLACK:
715                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
716                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
717                 _black.set_position (_black.position() + one_video_frame());
718                 break;
719         case SILENT:
720         {
721                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
722                 DCPTimePeriod period (_silent.period_at_position());
723                 if (_last_audio_time) {
724                         /* Sometimes the thing that happened last finishes fractionally before
725                            or after this silence.  Bodge the start time of the silence to fix it.
726                            I think this is nothing to worry about since we will just add or
727                            remove a little silence at the end of some content.
728                         */
729                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
730                         /* Let's not worry about less than a frame at 24fps */
731                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
732                         if (error >= too_much_error) {
733                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
734                         }
735                         DCPOMATIC_ASSERT (error < too_much_error);
736                         period.from = *_last_audio_time;
737                 }
738                 if (period.duration() > one_video_frame()) {
739                         period.to = period.from + one_video_frame();
740                 }
741                 fill_audio (period);
742                 _silent.set_position (period.to);
743                 break;
744         }
745         case NONE:
746                 done = true;
747                 break;
748         }
749
750         /* Emit any audio that is ready */
751
752         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
753            of our streams, or the position of the _silent.
754         */
755         auto pull_to = _playback_length;
756         for (auto const& i: _stream_states) {
757                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
758                         pull_to = i.second.last_push_end;
759                 }
760         }
761         if (!_silent.done() && _silent.position() < pull_to) {
762                 pull_to = _silent.position();
763         }
764
765         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
766         auto audio = _audio_merger.pull (pull_to);
767         for (auto i = audio.begin(); i != audio.end(); ++i) {
768                 if (_last_audio_time && i->second < *_last_audio_time) {
769                         /* This new data comes before the last we emitted (or the last seek); discard it */
770                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
771                         if (!cut.first) {
772                                 continue;
773                         }
774                         *i = cut;
775                 } else if (_last_audio_time && i->second > *_last_audio_time) {
776                         /* There's a gap between this data and the last we emitted; fill with silence */
777                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
778                 }
779
780                 emit_audio (i->first, i->second);
781         }
782
783         if (done) {
784                 _shuffler->flush ();
785                 for (auto const& i: _delay) {
786                         do_emit_video(i.first, i.second);
787                 }
788         }
789
790         return done;
791 }
792
793
794 /** @return Open subtitles for the frame at the given time, converted to images */
795 optional<PositionImage>
796 Player::open_subtitles_for_frame (DCPTime time) const
797 {
798         list<PositionImage> captions;
799         int const vfr = _film->video_frame_rate();
800
801         for (
802                 auto j:
803                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
804                 ) {
805
806                 /* Bitmap subtitles */
807                 for (auto i: j.bitmap) {
808                         if (!i.image) {
809                                 continue;
810                         }
811
812                         /* i.image will already have been scaled to fit _video_container_size */
813                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
814
815                         captions.push_back (
816                                 PositionImage (
817                                         i.image,
818                                         Position<int> (
819                                                 lrint(_video_container_size.width * i.rectangle.x),
820                                                 lrint(_video_container_size.height * i.rectangle.y)
821                                                 )
822                                         )
823                                 );
824                 }
825
826                 /* String subtitles (rendered to an image) */
827                 if (!j.string.empty()) {
828                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
829                         copy (s.begin(), s.end(), back_inserter (captions));
830                 }
831         }
832
833         if (captions.empty()) {
834                 return {};
835         }
836
837         return merge (captions);
838 }
839
840
841 void
842 Player::video (weak_ptr<Piece> wp, ContentVideo video)
843 {
844         if (_suspended) {
845                 return;
846         }
847
848         auto piece = wp.lock ();
849         if (!piece) {
850                 return;
851         }
852
853         if (!piece->content->video->use()) {
854                 return;
855         }
856
857         FrameRateChange frc (_film, piece->content);
858         if (frc.skip && (video.frame % 2) == 1) {
859                 return;
860         }
861
862         /* Time of the first frame we will emit */
863         DCPTime const time = content_video_to_dcp (piece, video.frame);
864         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
865
866         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
867            if it's after the content's period here as in that case we still need to fill any gap between
868            `now' and the end of the content's period.
869         */
870         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
871                 return;
872         }
873
874         if (piece->ignore_video && piece->ignore_video->contains(time)) {
875                 return;
876         }
877
878         /* Fill gaps that we discover now that we have some video which needs to be emitted.
879            This is where we need to fill to.
880         */
881         DCPTime fill_to = min (time, piece->content->end(_film));
882
883         if (_last_video_time) {
884                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
885
886                 /* Fill if we have more than half a frame to do */
887                 if ((fill_to - fill_from) > one_video_frame() / 2) {
888                         auto last = _last_video.find (wp);
889                         if (_film->three_d()) {
890                                 auto fill_to_eyes = video.eyes;
891                                 if (fill_to_eyes == Eyes::BOTH) {
892                                         fill_to_eyes = Eyes::LEFT;
893                                 }
894                                 if (fill_to == piece->content->end(_film)) {
895                                         /* Don't fill after the end of the content */
896                                         fill_to_eyes = Eyes::LEFT;
897                                 }
898                                 auto j = fill_from;
899                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
900                                 if (eyes == Eyes::BOTH) {
901                                         eyes = Eyes::LEFT;
902                                 }
903                                 while (j < fill_to || eyes != fill_to_eyes) {
904                                         if (last != _last_video.end()) {
905                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
906                                                 auto copy = last->second->shallow_copy();
907                                                 copy->set_eyes (eyes);
908                                                 emit_video (copy, j);
909                                         } else {
910                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
911                                                 emit_video (black_player_video_frame(eyes), j);
912                                         }
913                                         if (eyes == Eyes::RIGHT) {
914                                                 j += one_video_frame();
915                                         }
916                                         eyes = increment_eyes (eyes);
917                                 }
918                         } else {
919                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
920                                         if (last != _last_video.end()) {
921                                                 emit_video (last->second, j);
922                                         } else {
923                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
924                                         }
925                                 }
926                         }
927                 }
928         }
929
930         _last_video[wp] = std::make_shared<PlayerVideo>(
931                 video.image,
932                 piece->content->video->crop (),
933                 piece->content->video->fade (_film, video.frame),
934                 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
935                 _video_container_size,
936                 video.eyes,
937                 video.part,
938                 piece->content->video->colour_conversion(),
939                 piece->content->video->range(),
940                 piece->content,
941                 video.frame,
942                 false
943                 );
944
945         DCPTime t = time;
946         for (int i = 0; i < frc.repeat; ++i) {
947                 if (t < piece->content->end(_film)) {
948                         emit_video (_last_video[wp], t);
949                 }
950                 t += one_video_frame ();
951         }
952 }
953
954
955 void
956 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
957 {
958         if (_suspended) {
959                 return;
960         }
961
962         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
963
964         auto piece = wp.lock ();
965         if (!piece) {
966                 return;
967         }
968
969         auto content = piece->content->audio;
970         DCPOMATIC_ASSERT (content);
971
972         int const rfr = content->resampled_frame_rate (_film);
973
974         /* Compute time in the DCP */
975         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
976         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
977
978         /* And the end of this block in the DCP */
979         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
980
981         /* Remove anything that comes before the start or after the end of the content */
982         if (time < piece->content->position()) {
983                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
984                 if (!cut.first) {
985                         /* This audio is entirely discarded */
986                         return;
987                 }
988                 content_audio.audio = cut.first;
989                 time = cut.second;
990         } else if (time > piece->content->end(_film)) {
991                 /* Discard it all */
992                 return;
993         } else if (end > piece->content->end(_film)) {
994                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
995                 if (remaining_frames == 0) {
996                         return;
997                 }
998                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
999         }
1000
1001         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1002
1003         /* Gain */
1004
1005         if (content->gain() != 0) {
1006                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
1007                 gain->apply_gain (content->gain());
1008                 content_audio.audio = gain;
1009         }
1010
1011         /* Remap */
1012
1013         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1014
1015         /* Process */
1016
1017         if (_audio_processor) {
1018                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1019         }
1020
1021         /* Push */
1022
1023         _audio_merger.push (content_audio.audio, time);
1024         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1025         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1026 }
1027
1028
1029 void
1030 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
1031 {
1032         if (_suspended) {
1033                 return;
1034         }
1035
1036         auto piece = wp.lock ();
1037         auto text = wc.lock ();
1038         if (!piece || !text) {
1039                 return;
1040         }
1041
1042         /* Apply content's subtitle offsets */
1043         subtitle.sub.rectangle.x += text->x_offset ();
1044         subtitle.sub.rectangle.y += text->y_offset ();
1045
1046         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1047         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1048         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1049
1050         /* Apply content's subtitle scale */
1051         subtitle.sub.rectangle.width *= text->x_scale ();
1052         subtitle.sub.rectangle.height *= text->y_scale ();
1053
1054         PlayerText ps;
1055         auto image = subtitle.sub.image;
1056
1057         /* We will scale the subtitle up to fit _video_container_size */
1058         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1059         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1060         if (width == 0 || height == 0) {
1061                 return;
1062         }
1063
1064         dcp::Size scaled_size (width, height);
1065         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1066         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1067
1068         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1069 }
1070
1071
1072 void
1073 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1074 {
1075         if (_suspended) {
1076                 return;
1077         }
1078
1079         auto piece = wp.lock ();
1080         auto text = wc.lock ();
1081         if (!piece || !text) {
1082                 return;
1083         }
1084
1085         PlayerText ps;
1086         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1087
1088         if (from > piece->content->end(_film)) {
1089                 return;
1090         }
1091
1092         for (auto s: subtitle.subs) {
1093                 s.set_h_position (s.h_position() + text->x_offset ());
1094                 s.set_v_position (s.v_position() + text->y_offset ());
1095                 float const xs = text->x_scale();
1096                 float const ys = text->y_scale();
1097                 float size = s.size();
1098
1099                 /* Adjust size to express the common part of the scaling;
1100                    e.g. if xs = ys = 0.5 we scale size by 2.
1101                 */
1102                 if (xs > 1e-5 && ys > 1e-5) {
1103                         size *= 1 / min (1 / xs, 1 / ys);
1104                 }
1105                 s.set_size (size);
1106
1107                 /* Then express aspect ratio changes */
1108                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1109                         s.set_aspect_adjust (xs / ys);
1110                 }
1111
1112                 s.set_in (dcp::Time(from.seconds(), 1000));
1113                 ps.string.push_back (StringText (s, text->outline_width()));
1114                 ps.add_fonts (text->fonts ());
1115         }
1116
1117         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1118 }
1119
1120
1121 void
1122 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1123 {
1124         if (_suspended) {
1125                 return;
1126         }
1127
1128         auto text = wc.lock ();
1129         if (!text) {
1130                 return;
1131         }
1132
1133         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1134                 return;
1135         }
1136
1137         shared_ptr<Piece> piece = wp.lock ();
1138         if (!piece) {
1139                 return;
1140         }
1141
1142         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1143
1144         if (dcp_to > piece->content->end(_film)) {
1145                 return;
1146         }
1147
1148         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1149
1150         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1151         if (text->use() && !always && !text->burn()) {
1152                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1153         }
1154 }
1155
1156
1157 void
1158 Player::seek (DCPTime time, bool accurate)
1159 {
1160         boost::mutex::scoped_lock lm (_mutex);
1161         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1162
1163         if (_suspended) {
1164                 /* We can't seek in this state */
1165                 return;
1166         }
1167
1168         if (_shuffler) {
1169                 _shuffler->clear ();
1170         }
1171
1172         _delay.clear ();
1173
1174         if (_audio_processor) {
1175                 _audio_processor->flush ();
1176         }
1177
1178         _audio_merger.clear ();
1179         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1180                 _active_texts[i].clear ();
1181         }
1182
1183         for (auto i: _pieces) {
1184                 if (time < i->content->position()) {
1185                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1186                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1187                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1188                            been trimmed to a point between keyframes, or something).
1189                         */
1190                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1191                         i->done = false;
1192                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1193                         /* During; seek to position */
1194                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1195                         i->done = false;
1196                 } else {
1197                         /* After; this piece is done */
1198                         i->done = true;
1199                 }
1200         }
1201
1202         if (accurate) {
1203                 _last_video_time = time;
1204                 _last_video_eyes = Eyes::LEFT;
1205                 _last_audio_time = time;
1206         } else {
1207                 _last_video_time = optional<DCPTime>();
1208                 _last_video_eyes = optional<Eyes>();
1209                 _last_audio_time = optional<DCPTime>();
1210         }
1211
1212         _black.set_position (time);
1213         _silent.set_position (time);
1214
1215         _last_video.clear ();
1216 }
1217
1218
1219 void
1220 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1221 {
1222         if (!_film->three_d()) {
1223                 if (pv->eyes() == Eyes::LEFT) {
1224                         /* Use left-eye images for both eyes... */
1225                         pv->set_eyes (Eyes::BOTH);
1226                 } else if (pv->eyes() == Eyes::RIGHT) {
1227                         /* ...and discard the right */
1228                         return;
1229                 }
1230         }
1231
1232         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1233            player before the video that requires them.
1234         */
1235         _delay.push_back (make_pair (pv, time));
1236
1237         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1238                 _last_video_time = time + one_video_frame();
1239         }
1240         _last_video_eyes = increment_eyes (pv->eyes());
1241
1242         if (_delay.size() < 3) {
1243                 return;
1244         }
1245
1246         auto to_do = _delay.front();
1247         _delay.pop_front();
1248         do_emit_video (to_do.first, to_do.second);
1249 }
1250
1251
1252 void
1253 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1254 {
1255         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1256                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1257                         _active_texts[i].clear_before (time);
1258                 }
1259         }
1260
1261         auto subtitles = open_subtitles_for_frame (time);
1262         if (subtitles) {
1263                 pv->set_text (subtitles.get ());
1264         }
1265
1266         Video (pv, time);
1267 }
1268
1269
1270 void
1271 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1272 {
1273         /* Log if the assert below is about to fail */
1274         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1275                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1276         }
1277
1278         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1279         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1280         Audio (data, time, _film->audio_frame_rate());
1281         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1282 }
1283
1284
1285 void
1286 Player::fill_audio (DCPTimePeriod period)
1287 {
1288         if (period.from == period.to) {
1289                 return;
1290         }
1291
1292         DCPOMATIC_ASSERT (period.from < period.to);
1293
1294         DCPTime t = period.from;
1295         while (t < period.to) {
1296                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1297                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1298                 if (samples) {
1299                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1300                         silence->make_silent ();
1301                         emit_audio (silence, t);
1302                 }
1303                 t += block;
1304         }
1305 }
1306
1307
1308 DCPTime
1309 Player::one_video_frame () const
1310 {
1311         return DCPTime::from_frames (1, _film->video_frame_rate ());
1312 }
1313
1314
1315 pair<shared_ptr<AudioBuffers>, DCPTime>
1316 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1317 {
1318         auto const discard_time = discard_to - time;
1319         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1320         auto remaining_frames = audio->frames() - discard_frames;
1321         if (remaining_frames <= 0) {
1322                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1323         }
1324         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1325         return make_pair(cut, time + discard_time);
1326 }
1327
1328
1329 void
1330 Player::set_dcp_decode_reduction (optional<int> reduction)
1331 {
1332         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1333
1334         {
1335                 boost::mutex::scoped_lock lm (_mutex);
1336
1337                 if (reduction == _dcp_decode_reduction) {
1338                         lm.unlock ();
1339                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1340                         return;
1341                 }
1342
1343                 _dcp_decode_reduction = reduction;
1344                 setup_pieces_unlocked ();
1345         }
1346
1347         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1348 }
1349
1350
1351 optional<DCPTime>
1352 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1353 {
1354         boost::mutex::scoped_lock lm (_mutex);
1355
1356         for (auto i: _pieces) {
1357                 if (i->content == content) {
1358                         return content_time_to_dcp (i, t);
1359                 }
1360         }
1361
1362         /* We couldn't find this content; perhaps things are being changed over */
1363         return {};
1364 }
1365
1366
1367 shared_ptr<const Playlist>
1368 Player::playlist () const
1369 {
1370         return _playlist ? _playlist : _film->playlist();
1371 }
1372
1373
1374 void
1375 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1376 {
1377         if (_suspended) {
1378                 return;
1379         }
1380
1381         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1382 }
1383