Replace aligned bool with enum Alignment.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103         , _subtitle_alignment (subtitle_alignment)
104 {
105         construct ();
106 }
107
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _tolerant (film->tolerant())
114         , _audio_merger (_film->audio_frame_rate())
115 {
116         construct ();
117 }
118
119
120 void
121 Player::construct ()
122 {
123         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
124         /* The butler must hear about this first, so since we are proxying this through to the butler we must
125            be first.
126         */
127         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
128         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
129         set_video_container_size (_film->frame_size ());
130
131         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
132
133         setup_pieces ();
134         seek (DCPTime (), true);
135 }
136
137
138 void
139 Player::setup_pieces ()
140 {
141         boost::mutex::scoped_lock lm (_mutex);
142         setup_pieces_unlocked ();
143 }
144
145
146 bool
147 have_video (shared_ptr<const Content> content)
148 {
149         return static_cast<bool>(content->video) && content->video->use();
150 }
151
152
153 bool
154 have_audio (shared_ptr<const Content> content)
155 {
156         return static_cast<bool>(content->audio);
157 }
158
159
160 void
161 Player::setup_pieces_unlocked ()
162 {
163         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
164
165         auto old_pieces = _pieces;
166         _pieces.clear ();
167
168         _shuffler.reset (new Shuffler());
169         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
170
171         for (auto i: playlist()->content()) {
172
173                 if (!i->paths_valid ()) {
174                         continue;
175                 }
176
177                 if (_ignore_video && _ignore_audio && i->text.empty()) {
178                         /* We're only interested in text and this content has none */
179                         continue;
180                 }
181
182                 shared_ptr<Decoder> old_decoder;
183                 for (auto j: old_pieces) {
184                         if (j->content == i) {
185                                 old_decoder = j->decoder;
186                                 break;
187                         }
188                 }
189
190                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
191                 DCPOMATIC_ASSERT (decoder);
192
193                 FrameRateChange frc (_film, i);
194
195                 if (decoder->video && _ignore_video) {
196                         decoder->video->set_ignore (true);
197                 }
198
199                 if (decoder->audio && _ignore_audio) {
200                         decoder->audio->set_ignore (true);
201                 }
202
203                 if (_ignore_text) {
204                         for (auto i: decoder->text) {
205                                 i->set_ignore (true);
206                         }
207                 }
208
209                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
210                 if (dcp) {
211                         dcp->set_decode_referenced (_play_referenced);
212                         if (_play_referenced) {
213                                 dcp->set_forced_reduction (_dcp_decode_reduction);
214                         }
215                 }
216
217                 auto piece = make_shared<Piece>(i, decoder, frc);
218                 _pieces.push_back (piece);
219
220                 if (decoder->video) {
221                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
222                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
223                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
224                         } else {
225                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
226                         }
227                 }
228
229                 if (decoder->audio) {
230                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
231                 }
232
233                 auto j = decoder->text.begin();
234
235                 while (j != decoder->text.end()) {
236                         (*j)->BitmapStart.connect (
237                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
238                                 );
239                         (*j)->PlainStart.connect (
240                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
241                                 );
242                         (*j)->Stop.connect (
243                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245
246                         ++j;
247                 }
248
249                 if (decoder->atmos) {
250                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
251                 }
252         }
253
254         _stream_states.clear ();
255         for (auto i: _pieces) {
256                 if (i->content->audio) {
257                         for (auto j: i->content->audio->streams()) {
258                                 _stream_states[j] = StreamState (i, i->content->position ());
259                         }
260                 }
261         }
262
263         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
264                 if (auto video = (*i)->content->video) {
265                         if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
266                                 /* Look for content later in the content list with in-use video that overlaps this */
267                                 auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
268                                 auto j = i;
269                                 ++j;
270                                 for (; j != _pieces.end(); ++j) {
271                                         if ((*j)->content->video && (*j)->content->video->use()) {
272                                                 (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
273                                         }
274                                 }
275                         }
276                 }
277         }
278
279         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
280         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
281
282         _last_video_time = boost::optional<dcpomatic::DCPTime>();
283         _last_video_eyes = Eyes::BOTH;
284         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
285 }
286
287
288 void
289 Player::playlist_content_change (ChangeType type, int property, bool frequent)
290 {
291         if (property == VideoContentProperty::CROP) {
292                 if (type == ChangeType::DONE) {
293                         auto const vcs = video_container_size();
294                         boost::mutex::scoped_lock lm (_mutex);
295                         for (auto const& i: _delay) {
296                                 i.first->reset_metadata (_film, vcs);
297                         }
298                 }
299         } else {
300                 if (type == ChangeType::PENDING) {
301                         /* The player content is probably about to change, so we can't carry on
302                            until that has happened and we've rebuilt our pieces.  Stop pass()
303                            and seek() from working until then.
304                         */
305                         ++_suspended;
306                 } else if (type == ChangeType::DONE) {
307                         /* A change in our content has gone through.  Re-build our pieces. */
308                         setup_pieces ();
309                         --_suspended;
310                 } else if (type == ChangeType::CANCELLED) {
311                         --_suspended;
312                 }
313         }
314
315         Change (type, property, frequent);
316 }
317
318
319 void
320 Player::set_video_container_size (dcp::Size s)
321 {
322         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
323
324         {
325                 boost::mutex::scoped_lock lm (_mutex);
326
327                 if (s == _video_container_size) {
328                         lm.unlock ();
329                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
330                         return;
331                 }
332
333                 _video_container_size = s;
334
335                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
336                 _black_image->make_black ();
337         }
338
339         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
340 }
341
342
343 void
344 Player::playlist_change (ChangeType type)
345 {
346         if (type == ChangeType::DONE) {
347                 setup_pieces ();
348         }
349         Change (type, PlayerProperty::PLAYLIST, false);
350 }
351
352
353 void
354 Player::film_change (ChangeType type, Film::Property p)
355 {
356         /* Here we should notice Film properties that affect our output, and
357            alert listeners that our output now would be different to how it was
358            last time we were run.
359         */
360
361         if (p == Film::Property::CONTAINER) {
362                 Change (type, PlayerProperty::FILM_CONTAINER, false);
363         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
364                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
365                    so we need new pieces here.
366                 */
367                 if (type == ChangeType::DONE) {
368                         setup_pieces ();
369                 }
370                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
371         } else if (p == Film::Property::AUDIO_PROCESSOR) {
372                 if (type == ChangeType::DONE && _film->audio_processor ()) {
373                         boost::mutex::scoped_lock lm (_mutex);
374                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
375                 }
376         } else if (p == Film::Property::AUDIO_CHANNELS) {
377                 if (type == ChangeType::DONE) {
378                         boost::mutex::scoped_lock lm (_mutex);
379                         _audio_merger.clear ();
380                 }
381         }
382 }
383
384
385 shared_ptr<PlayerVideo>
386 Player::black_player_video_frame (Eyes eyes) const
387 {
388         return std::make_shared<PlayerVideo> (
389                 std::make_shared<const RawImageProxy>(_black_image),
390                 Crop(),
391                 optional<double>(),
392                 _video_container_size,
393                 _video_container_size,
394                 eyes,
395                 Part::WHOLE,
396                 PresetColourConversion::all().front().conversion,
397                 VideoRange::FULL,
398                 std::weak_ptr<Content>(),
399                 boost::optional<Frame>(),
400                 false
401         );
402 }
403
404
405 Frame
406 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
407 {
408         auto s = t - piece->content->position ();
409         s = min (piece->content->length_after_trim(_film), s);
410         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
411
412         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
413            then convert that ContentTime to frames at the content's rate.  However this fails for
414            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
415            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
416
417            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
418         */
419         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
420 }
421
422
423 DCPTime
424 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
425 {
426         /* See comment in dcp_to_content_video */
427         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
428         return d + piece->content->position();
429 }
430
431
432 Frame
433 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
434 {
435         auto s = t - piece->content->position ();
436         s = min (piece->content->length_after_trim(_film), s);
437         /* See notes in dcp_to_content_video */
438         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
439 }
440
441
442 DCPTime
443 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
444 {
445         /* See comment in dcp_to_content_video */
446         return DCPTime::from_frames (f, _film->audio_frame_rate())
447                 - DCPTime (piece->content->trim_start(), piece->frc)
448                 + piece->content->position();
449 }
450
451
452 ContentTime
453 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
454 {
455         auto s = t - piece->content->position ();
456         s = min (piece->content->length_after_trim(_film), s);
457         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
458 }
459
460
461 DCPTime
462 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
463 {
464         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
465 }
466
467
468 vector<FontData>
469 Player::get_subtitle_fonts ()
470 {
471         boost::mutex::scoped_lock lm (_mutex);
472
473         vector<FontData> fonts;
474         for (auto i: _pieces) {
475                 /* XXX: things may go wrong if there are duplicate font IDs
476                    with different font files.
477                 */
478                 auto f = i->decoder->fonts ();
479                 copy (f.begin(), f.end(), back_inserter(fonts));
480         }
481
482         return fonts;
483 }
484
485
486 /** Set this player never to produce any video data */
487 void
488 Player::set_ignore_video ()
489 {
490         boost::mutex::scoped_lock lm (_mutex);
491         _ignore_video = true;
492         setup_pieces_unlocked ();
493 }
494
495
496 void
497 Player::set_ignore_audio ()
498 {
499         boost::mutex::scoped_lock lm (_mutex);
500         _ignore_audio = true;
501         setup_pieces_unlocked ();
502 }
503
504
505 void
506 Player::set_ignore_text ()
507 {
508         boost::mutex::scoped_lock lm (_mutex);
509         _ignore_text = true;
510         setup_pieces_unlocked ();
511 }
512
513
514 /** Set the player to always burn open texts into the image regardless of the content settings */
515 void
516 Player::set_always_burn_open_subtitles ()
517 {
518         boost::mutex::scoped_lock lm (_mutex);
519         _always_burn_open_subtitles = true;
520 }
521
522
523 /** Sets up the player to be faster, possibly at the expense of quality */
524 void
525 Player::set_fast ()
526 {
527         boost::mutex::scoped_lock lm (_mutex);
528         _fast = true;
529         setup_pieces_unlocked ();
530 }
531
532
533 void
534 Player::set_play_referenced ()
535 {
536         boost::mutex::scoped_lock lm (_mutex);
537         _play_referenced = true;
538         setup_pieces_unlocked ();
539 }
540
541
542 static void
543 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
544 {
545         DCPOMATIC_ASSERT (r);
546         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
547         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
548         if (r->actual_duration() > 0) {
549                 a.push_back (
550                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
551                         );
552         }
553 }
554
555
556 list<ReferencedReelAsset>
557 Player::get_reel_assets ()
558 {
559         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
560
561         list<ReferencedReelAsset> a;
562
563         for (auto i: playlist()->content()) {
564                 auto j = dynamic_pointer_cast<DCPContent> (i);
565                 if (!j) {
566                         continue;
567                 }
568
569                 scoped_ptr<DCPDecoder> decoder;
570                 try {
571                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
572                 } catch (...) {
573                         return a;
574                 }
575
576                 DCPOMATIC_ASSERT (j->video_frame_rate ());
577                 double const cfr = j->video_frame_rate().get();
578                 Frame const trim_start = j->trim_start().frames_round (cfr);
579                 Frame const trim_end = j->trim_end().frames_round (cfr);
580                 int const ffr = _film->video_frame_rate ();
581
582                 /* position in the asset from the start */
583                 int64_t offset_from_start = 0;
584                 /* position in the asset from the end */
585                 int64_t offset_from_end = 0;
586                 for (auto k: decoder->reels()) {
587                         /* Assume that main picture duration is the length of the reel */
588                         offset_from_end += k->main_picture()->actual_duration();
589                 }
590
591                 for (auto k: decoder->reels()) {
592
593                         /* Assume that main picture duration is the length of the reel */
594                         int64_t const reel_duration = k->main_picture()->actual_duration();
595
596                         /* See doc/design/trim_reels.svg */
597                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
598                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
599
600                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
601                         if (j->reference_video ()) {
602                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
603                         }
604
605                         if (j->reference_audio ()) {
606                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
607                         }
608
609                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
610                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
611                         }
612
613                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
614                                 for (auto l: k->closed_captions()) {
615                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
616                                 }
617                         }
618
619                         offset_from_start += reel_duration;
620                         offset_from_end -= reel_duration;
621                 }
622         }
623
624         return a;
625 }
626
627
628 bool
629 Player::pass ()
630 {
631         boost::mutex::scoped_lock lm (_mutex);
632
633         if (_suspended) {
634                 /* We can't pass in this state */
635                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
636                 return false;
637         }
638
639         if (_playback_length == DCPTime()) {
640                 /* Special; just give one black frame */
641                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
642                 return true;
643         }
644
645         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
646
647         shared_ptr<Piece> earliest_content;
648         optional<DCPTime> earliest_time;
649
650         for (auto i: _pieces) {
651                 if (i->done) {
652                         continue;
653                 }
654
655                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
656                 if (t > i->content->end(_film)) {
657                         i->done = true;
658                 } else {
659
660                         /* Given two choices at the same time, pick the one with texts so we see it before
661                            the video.
662                         */
663                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
664                                 earliest_time = t;
665                                 earliest_content = i;
666                         }
667                 }
668         }
669
670         bool done = false;
671
672         enum {
673                 NONE,
674                 CONTENT,
675                 BLACK,
676                 SILENT
677         } which = NONE;
678
679         if (earliest_content) {
680                 which = CONTENT;
681         }
682
683         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
684                 earliest_time = _black.position ();
685                 which = BLACK;
686         }
687
688         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
689                 earliest_time = _silent.position ();
690                 which = SILENT;
691         }
692
693         switch (which) {
694         case CONTENT:
695         {
696                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
697                 earliest_content->done = earliest_content->decoder->pass ();
698                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
699                 if (dcp && !_play_referenced && dcp->reference_audio()) {
700                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
701                            to `hide' the fact that no audio was emitted during the referenced DCP (though
702                            we need to behave as though it was).
703                         */
704                         _last_audio_time = dcp->end (_film);
705                 }
706                 break;
707         }
708         case BLACK:
709                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
710                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
711                 _black.set_position (_black.position() + one_video_frame());
712                 break;
713         case SILENT:
714         {
715                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
716                 DCPTimePeriod period (_silent.period_at_position());
717                 if (_last_audio_time) {
718                         /* Sometimes the thing that happened last finishes fractionally before
719                            or after this silence.  Bodge the start time of the silence to fix it.
720                            I think this is nothing to worry about since we will just add or
721                            remove a little silence at the end of some content.
722                         */
723                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
724                         /* Let's not worry about less than a frame at 24fps */
725                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
726                         if (error >= too_much_error) {
727                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
728                         }
729                         DCPOMATIC_ASSERT (error < too_much_error);
730                         period.from = *_last_audio_time;
731                 }
732                 if (period.duration() > one_video_frame()) {
733                         period.to = period.from + one_video_frame();
734                 }
735                 fill_audio (period);
736                 _silent.set_position (period.to);
737                 break;
738         }
739         case NONE:
740                 done = true;
741                 break;
742         }
743
744         /* Emit any audio that is ready */
745
746         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
747            of our streams, or the position of the _silent.
748         */
749         auto pull_to = _playback_length;
750         for (auto const& i: _stream_states) {
751                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
752                         pull_to = i.second.last_push_end;
753                 }
754         }
755         if (!_silent.done() && _silent.position() < pull_to) {
756                 pull_to = _silent.position();
757         }
758
759         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
760         auto audio = _audio_merger.pull (pull_to);
761         for (auto i = audio.begin(); i != audio.end(); ++i) {
762                 if (_last_audio_time && i->second < *_last_audio_time) {
763                         /* This new data comes before the last we emitted (or the last seek); discard it */
764                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
765                         if (!cut.first) {
766                                 continue;
767                         }
768                         *i = cut;
769                 } else if (_last_audio_time && i->second > *_last_audio_time) {
770                         /* There's a gap between this data and the last we emitted; fill with silence */
771                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
772                 }
773
774                 emit_audio (i->first, i->second);
775         }
776
777         if (done) {
778                 _shuffler->flush ();
779                 for (auto const& i: _delay) {
780                         do_emit_video(i.first, i.second);
781                 }
782         }
783
784         return done;
785 }
786
787
788 /** @return Open subtitles for the frame at the given time, converted to images */
789 optional<PositionImage>
790 Player::open_subtitles_for_frame (DCPTime time) const
791 {
792         list<PositionImage> captions;
793         int const vfr = _film->video_frame_rate();
794
795         for (
796                 auto j:
797                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
798                 ) {
799
800                 /* Bitmap subtitles */
801                 for (auto i: j.bitmap) {
802                         if (!i.image) {
803                                 continue;
804                         }
805
806                         /* i.image will already have been scaled to fit _video_container_size */
807                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
808
809                         captions.push_back (
810                                 PositionImage (
811                                         i.image,
812                                         Position<int> (
813                                                 lrint(_video_container_size.width * i.rectangle.x),
814                                                 lrint(_video_container_size.height * i.rectangle.y)
815                                                 )
816                                         )
817                                 );
818                 }
819
820                 /* String subtitles (rendered to an image) */
821                 if (!j.string.empty()) {
822                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
823                         copy (s.begin(), s.end(), back_inserter (captions));
824                 }
825         }
826
827         if (captions.empty()) {
828                 return {};
829         }
830
831         return merge (captions, _subtitle_alignment);
832 }
833
834
835 void
836 Player::video (weak_ptr<Piece> wp, ContentVideo video)
837 {
838         if (_suspended) {
839                 return;
840         }
841
842         auto piece = wp.lock ();
843         if (!piece) {
844                 return;
845         }
846
847         if (!piece->content->video->use()) {
848                 return;
849         }
850
851         FrameRateChange frc (_film, piece->content);
852         if (frc.skip && (video.frame % 2) == 1) {
853                 return;
854         }
855
856         /* Time of the first frame we will emit */
857         DCPTime const time = content_video_to_dcp (piece, video.frame);
858         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
859
860         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
861            if it's after the content's period here as in that case we still need to fill any gap between
862            `now' and the end of the content's period.
863         */
864         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
865                 return;
866         }
867
868         if (piece->ignore_video && piece->ignore_video->contains(time)) {
869                 return;
870         }
871
872         /* Fill gaps that we discover now that we have some video which needs to be emitted.
873            This is where we need to fill to.
874         */
875         DCPTime fill_to = min (time, piece->content->end(_film));
876
877         if (_last_video_time) {
878                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
879
880                 /* Fill if we have more than half a frame to do */
881                 if ((fill_to - fill_from) > one_video_frame() / 2) {
882                         auto last = _last_video.find (wp);
883                         if (_film->three_d()) {
884                                 auto fill_to_eyes = video.eyes;
885                                 if (fill_to_eyes == Eyes::BOTH) {
886                                         fill_to_eyes = Eyes::LEFT;
887                                 }
888                                 if (fill_to == piece->content->end(_film)) {
889                                         /* Don't fill after the end of the content */
890                                         fill_to_eyes = Eyes::LEFT;
891                                 }
892                                 auto j = fill_from;
893                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
894                                 if (eyes == Eyes::BOTH) {
895                                         eyes = Eyes::LEFT;
896                                 }
897                                 while (j < fill_to || eyes != fill_to_eyes) {
898                                         if (last != _last_video.end()) {
899                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
900                                                 auto copy = last->second->shallow_copy();
901                                                 copy->set_eyes (eyes);
902                                                 emit_video (copy, j);
903                                         } else {
904                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
905                                                 emit_video (black_player_video_frame(eyes), j);
906                                         }
907                                         if (eyes == Eyes::RIGHT) {
908                                                 j += one_video_frame();
909                                         }
910                                         eyes = increment_eyes (eyes);
911                                 }
912                         } else {
913                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
914                                         if (last != _last_video.end()) {
915                                                 emit_video (last->second, j);
916                                         } else {
917                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
918                                         }
919                                 }
920                         }
921                 }
922         }
923
924         _last_video[wp] = std::make_shared<PlayerVideo>(
925                 video.image,
926                 piece->content->video->crop (),
927                 piece->content->video->fade (_film, video.frame),
928                 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
929                 _video_container_size,
930                 video.eyes,
931                 video.part,
932                 piece->content->video->colour_conversion(),
933                 piece->content->video->range(),
934                 piece->content,
935                 video.frame,
936                 false
937                 );
938
939         DCPTime t = time;
940         for (int i = 0; i < frc.repeat; ++i) {
941                 if (t < piece->content->end(_film)) {
942                         emit_video (_last_video[wp], t);
943                 }
944                 t += one_video_frame ();
945         }
946 }
947
948
949 void
950 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
951 {
952         if (_suspended) {
953                 return;
954         }
955
956         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
957
958         auto piece = wp.lock ();
959         if (!piece) {
960                 return;
961         }
962
963         auto content = piece->content->audio;
964         DCPOMATIC_ASSERT (content);
965
966         int const rfr = content->resampled_frame_rate (_film);
967
968         /* Compute time in the DCP */
969         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
970         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
971
972         /* And the end of this block in the DCP */
973         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
974
975         /* Remove anything that comes before the start or after the end of the content */
976         if (time < piece->content->position()) {
977                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
978                 if (!cut.first) {
979                         /* This audio is entirely discarded */
980                         return;
981                 }
982                 content_audio.audio = cut.first;
983                 time = cut.second;
984         } else if (time > piece->content->end(_film)) {
985                 /* Discard it all */
986                 return;
987         } else if (end > piece->content->end(_film)) {
988                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
989                 if (remaining_frames == 0) {
990                         return;
991                 }
992                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
993         }
994
995         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
996
997         /* Gain */
998
999         if (content->gain() != 0) {
1000                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
1001                 gain->apply_gain (content->gain());
1002                 content_audio.audio = gain;
1003         }
1004
1005         /* Remap */
1006
1007         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1008
1009         /* Process */
1010
1011         if (_audio_processor) {
1012                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1013         }
1014
1015         /* Push */
1016
1017         _audio_merger.push (content_audio.audio, time);
1018         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1019         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1020 }
1021
1022
1023 void
1024 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
1025 {
1026         if (_suspended) {
1027                 return;
1028         }
1029
1030         auto piece = wp.lock ();
1031         auto text = wc.lock ();
1032         if (!piece || !text) {
1033                 return;
1034         }
1035
1036         /* Apply content's subtitle offsets */
1037         subtitle.sub.rectangle.x += text->x_offset ();
1038         subtitle.sub.rectangle.y += text->y_offset ();
1039
1040         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1041         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1042         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1043
1044         /* Apply content's subtitle scale */
1045         subtitle.sub.rectangle.width *= text->x_scale ();
1046         subtitle.sub.rectangle.height *= text->y_scale ();
1047
1048         PlayerText ps;
1049         auto image = subtitle.sub.image;
1050
1051         /* We will scale the subtitle up to fit _video_container_size */
1052         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1053         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1054         if (width == 0 || height == 0) {
1055                 return;
1056         }
1057
1058         dcp::Size scaled_size (width, height);
1059         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
1060         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1061
1062         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1063 }
1064
1065
1066 void
1067 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1068 {
1069         if (_suspended) {
1070                 return;
1071         }
1072
1073         auto piece = wp.lock ();
1074         auto text = wc.lock ();
1075         if (!piece || !text) {
1076                 return;
1077         }
1078
1079         PlayerText ps;
1080         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1081
1082         if (from > piece->content->end(_film)) {
1083                 return;
1084         }
1085
1086         for (auto s: subtitle.subs) {
1087                 s.set_h_position (s.h_position() + text->x_offset ());
1088                 s.set_v_position (s.v_position() + text->y_offset ());
1089                 float const xs = text->x_scale();
1090                 float const ys = text->y_scale();
1091                 float size = s.size();
1092
1093                 /* Adjust size to express the common part of the scaling;
1094                    e.g. if xs = ys = 0.5 we scale size by 2.
1095                 */
1096                 if (xs > 1e-5 && ys > 1e-5) {
1097                         size *= 1 / min (1 / xs, 1 / ys);
1098                 }
1099                 s.set_size (size);
1100
1101                 /* Then express aspect ratio changes */
1102                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1103                         s.set_aspect_adjust (xs / ys);
1104                 }
1105
1106                 s.set_in (dcp::Time(from.seconds(), 1000));
1107                 ps.string.push_back (StringText (s, text->outline_width()));
1108                 ps.add_fonts (text->fonts ());
1109         }
1110
1111         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1112 }
1113
1114
1115 void
1116 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1117 {
1118         if (_suspended) {
1119                 return;
1120         }
1121
1122         auto text = wc.lock ();
1123         if (!text) {
1124                 return;
1125         }
1126
1127         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1128                 return;
1129         }
1130
1131         shared_ptr<Piece> piece = wp.lock ();
1132         if (!piece) {
1133                 return;
1134         }
1135
1136         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1137
1138         if (dcp_to > piece->content->end(_film)) {
1139                 return;
1140         }
1141
1142         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1143
1144         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1145         if (text->use() && !always && !text->burn()) {
1146                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1147         }
1148 }
1149
1150
1151 void
1152 Player::seek (DCPTime time, bool accurate)
1153 {
1154         boost::mutex::scoped_lock lm (_mutex);
1155         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1156
1157         if (_suspended) {
1158                 /* We can't seek in this state */
1159                 return;
1160         }
1161
1162         if (_shuffler) {
1163                 _shuffler->clear ();
1164         }
1165
1166         _delay.clear ();
1167
1168         if (_audio_processor) {
1169                 _audio_processor->flush ();
1170         }
1171
1172         _audio_merger.clear ();
1173         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1174                 _active_texts[i].clear ();
1175         }
1176
1177         for (auto i: _pieces) {
1178                 if (time < i->content->position()) {
1179                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1180                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1181                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1182                            been trimmed to a point between keyframes, or something).
1183                         */
1184                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1185                         i->done = false;
1186                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1187                         /* During; seek to position */
1188                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1189                         i->done = false;
1190                 } else {
1191                         /* After; this piece is done */
1192                         i->done = true;
1193                 }
1194         }
1195
1196         if (accurate) {
1197                 _last_video_time = time;
1198                 _last_video_eyes = Eyes::LEFT;
1199                 _last_audio_time = time;
1200         } else {
1201                 _last_video_time = optional<DCPTime>();
1202                 _last_video_eyes = optional<Eyes>();
1203                 _last_audio_time = optional<DCPTime>();
1204         }
1205
1206         _black.set_position (time);
1207         _silent.set_position (time);
1208
1209         _last_video.clear ();
1210 }
1211
1212
1213 void
1214 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1215 {
1216         if (!_film->three_d()) {
1217                 if (pv->eyes() == Eyes::LEFT) {
1218                         /* Use left-eye images for both eyes... */
1219                         pv->set_eyes (Eyes::BOTH);
1220                 } else if (pv->eyes() == Eyes::RIGHT) {
1221                         /* ...and discard the right */
1222                         return;
1223                 }
1224         }
1225
1226         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1227            player before the video that requires them.
1228         */
1229         _delay.push_back (make_pair (pv, time));
1230
1231         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1232                 _last_video_time = time + one_video_frame();
1233         }
1234         _last_video_eyes = increment_eyes (pv->eyes());
1235
1236         if (_delay.size() < 3) {
1237                 return;
1238         }
1239
1240         auto to_do = _delay.front();
1241         _delay.pop_front();
1242         do_emit_video (to_do.first, to_do.second);
1243 }
1244
1245
1246 void
1247 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1248 {
1249         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1250                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1251                         _active_texts[i].clear_before (time);
1252                 }
1253         }
1254
1255         auto subtitles = open_subtitles_for_frame (time);
1256         if (subtitles) {
1257                 pv->set_text (subtitles.get ());
1258         }
1259
1260         Video (pv, time);
1261 }
1262
1263
1264 void
1265 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1266 {
1267         /* Log if the assert below is about to fail */
1268         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1269                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1270         }
1271
1272         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1273         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1274         Audio (data, time, _film->audio_frame_rate());
1275         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1276 }
1277
1278
1279 void
1280 Player::fill_audio (DCPTimePeriod period)
1281 {
1282         if (period.from == period.to) {
1283                 return;
1284         }
1285
1286         DCPOMATIC_ASSERT (period.from < period.to);
1287
1288         DCPTime t = period.from;
1289         while (t < period.to) {
1290                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1291                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1292                 if (samples) {
1293                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1294                         silence->make_silent ();
1295                         emit_audio (silence, t);
1296                 }
1297                 t += block;
1298         }
1299 }
1300
1301
1302 DCPTime
1303 Player::one_video_frame () const
1304 {
1305         return DCPTime::from_frames (1, _film->video_frame_rate ());
1306 }
1307
1308
1309 pair<shared_ptr<AudioBuffers>, DCPTime>
1310 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1311 {
1312         auto const discard_time = discard_to - time;
1313         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1314         auto remaining_frames = audio->frames() - discard_frames;
1315         if (remaining_frames <= 0) {
1316                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1317         }
1318         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1319         return make_pair(cut, time + discard_time);
1320 }
1321
1322
1323 void
1324 Player::set_dcp_decode_reduction (optional<int> reduction)
1325 {
1326         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1327
1328         {
1329                 boost::mutex::scoped_lock lm (_mutex);
1330
1331                 if (reduction == _dcp_decode_reduction) {
1332                         lm.unlock ();
1333                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1334                         return;
1335                 }
1336
1337                 _dcp_decode_reduction = reduction;
1338                 setup_pieces_unlocked ();
1339         }
1340
1341         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1342 }
1343
1344
1345 optional<DCPTime>
1346 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1347 {
1348         boost::mutex::scoped_lock lm (_mutex);
1349
1350         for (auto i: _pieces) {
1351                 if (i->content == content) {
1352                         return content_time_to_dcp (i, t);
1353                 }
1354         }
1355
1356         /* We couldn't find this content; perhaps things are being changed over */
1357         return {};
1358 }
1359
1360
1361 shared_ptr<const Playlist>
1362 Player::playlist () const
1363 {
1364         return _playlist ? _playlist : _film->playlist();
1365 }
1366
1367
1368 void
1369 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1370 {
1371         if (_suspended) {
1372                 return;
1373         }
1374
1375         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1376 }
1377