Missed update to private test repo version.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::make_shared;
75 using std::max;
76 using std::min;
77 using std::min;
78 using std::pair;
79 using std::shared_ptr;
80 using std::vector;
81 using std::weak_ptr;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
99         : _film (film)
100         , _suspended (0)
101         , _ignore_video(false)
102         , _ignore_audio(false)
103         , _ignore_text(false)
104         , _always_burn_open_subtitles(false)
105         , _fast(false)
106         , _tolerant (film->tolerant())
107         , _play_referenced(false)
108         , _audio_merger (_film->audio_frame_rate())
109         , _subtitle_alignment (subtitle_alignment)
110 {
111         construct ();
112 }
113
114
115 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
116         : _film (film)
117         , _playlist (playlist_)
118         , _suspended (0)
119         , _ignore_video(false)
120         , _ignore_audio(false)
121         , _ignore_text(false)
122         , _always_burn_open_subtitles(false)
123         , _fast(false)
124         , _tolerant (film->tolerant())
125         , _play_referenced(false)
126         , _audio_merger (_film->audio_frame_rate())
127 {
128         construct ();
129 }
130
131
132 void
133 Player::construct ()
134 {
135         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
136         /* The butler must hear about this first, so since we are proxying this through to the butler we must
137            be first.
138         */
139         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
140         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
141         set_video_container_size (_film->frame_size ());
142
143         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
144
145         setup_pieces ();
146         seek (DCPTime (), true);
147 }
148
149
150 bool
151 have_video (shared_ptr<const Content> content)
152 {
153         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
154 }
155
156
157 bool
158 have_audio (shared_ptr<const Content> content)
159 {
160         return static_cast<bool>(content->audio) && content->can_be_played();
161 }
162
163
164 void
165 Player::setup_pieces ()
166 {
167         boost::mutex::scoped_lock lm (_mutex);
168
169         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
170
171         auto old_pieces = _pieces;
172         _pieces.clear ();
173
174         auto playlist_content = playlist()->content();
175         bool const have_threed = std::any_of(
176                 playlist_content.begin(),
177                 playlist_content.end(),
178                 [](shared_ptr<const Content> c) {
179                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
180                 });
181
182
183         if (have_threed) {
184                 _shuffler.reset(new Shuffler());
185                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
186         }
187
188         for (auto i: playlist()->content()) {
189
190                 if (!i->paths_valid ()) {
191                         continue;
192                 }
193
194                 if (_ignore_video && _ignore_audio && i->text.empty()) {
195                         /* We're only interested in text and this content has none */
196                         continue;
197                 }
198
199                 shared_ptr<Decoder> old_decoder;
200                 for (auto j: old_pieces) {
201                         if (j->content == i) {
202                                 old_decoder = j->decoder;
203                                 break;
204                         }
205                 }
206
207                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
208                 DCPOMATIC_ASSERT (decoder);
209
210                 FrameRateChange frc (_film, i);
211
212                 if (decoder->video && _ignore_video) {
213                         decoder->video->set_ignore (true);
214                 }
215
216                 if (decoder->audio && _ignore_audio) {
217                         decoder->audio->set_ignore (true);
218                 }
219
220                 if (_ignore_text) {
221                         for (auto i: decoder->text) {
222                                 i->set_ignore (true);
223                         }
224                 }
225
226                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
227                 if (dcp) {
228                         dcp->set_decode_referenced (_play_referenced);
229                         if (_play_referenced) {
230                                 dcp->set_forced_reduction (_dcp_decode_reduction);
231                         }
232                 }
233
234                 auto piece = make_shared<Piece>(i, decoder, frc);
235                 _pieces.push_back (piece);
236
237                 if (decoder->video) {
238                         if (have_threed) {
239                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
240                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
241                         } else {
242                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
243                         }
244                 }
245
246                 if (decoder->audio) {
247                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
248                 }
249
250                 auto j = decoder->text.begin();
251
252                 while (j != decoder->text.end()) {
253                         (*j)->BitmapStart.connect (
254                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
255                                 );
256                         (*j)->PlainStart.connect (
257                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
258                                 );
259                         (*j)->Stop.connect (
260                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
261                                 );
262
263                         ++j;
264                 }
265
266                 if (decoder->atmos) {
267                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
268                 }
269         }
270
271         _stream_states.clear ();
272         for (auto i: _pieces) {
273                 if (i->content->audio) {
274                         for (auto j: i->content->audio->streams()) {
275                                 _stream_states[j] = StreamState (i, i->content->position ());
276                         }
277                 }
278         }
279
280         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
281                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
282         };
283
284         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
285                 if (ignore_overlap((*i)->content->video)) {
286                         /* Look for content later in the content list with in-use video that overlaps this */
287                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
288                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
289                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
290                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
291                                 }
292                         }
293                 }
294         }
295
296         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
297         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
298
299         _next_video_time = boost::none;
300         _next_video_eyes = Eyes::BOTH;
301         _next_audio_time = boost::none;
302 }
303
304
305 void
306 Player::playlist_content_change (ChangeType type, int property, bool frequent)
307 {
308         if (property == VideoContentProperty::CROP) {
309                 if (type == ChangeType::DONE) {
310                         boost::mutex::scoped_lock lm (_mutex);
311                         for (auto const& i: _delay) {
312                                 i.first->reset_metadata(_film, _video_container_size);
313                         }
314                 }
315         } else {
316                 if (type == ChangeType::PENDING) {
317                         /* The player content is probably about to change, so we can't carry on
318                            until that has happened and we've rebuilt our pieces.  Stop pass()
319                            and seek() from working until then.
320                         */
321                         ++_suspended;
322                 } else if (type == ChangeType::DONE) {
323                         /* A change in our content has gone through.  Re-build our pieces. */
324                         setup_pieces ();
325                         --_suspended;
326                 } else if (type == ChangeType::CANCELLED) {
327                         --_suspended;
328                 }
329         }
330
331         Change (type, property, frequent);
332 }
333
334
335 void
336 Player::set_video_container_size (dcp::Size s)
337 {
338         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
339
340         if (s == _video_container_size) {
341                 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
342                 return;
343         }
344
345         _video_container_size = s;
346
347         {
348                 boost::mutex::scoped_lock lm(_black_image_mutex);
349                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
350                 _black_image->make_black ();
351         }
352
353         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
354 }
355
356
357 void
358 Player::playlist_change (ChangeType type)
359 {
360         if (type == ChangeType::DONE) {
361                 setup_pieces ();
362         }
363         Change (type, PlayerProperty::PLAYLIST, false);
364 }
365
366
367 void
368 Player::film_change (ChangeType type, Film::Property p)
369 {
370         /* Here we should notice Film properties that affect our output, and
371            alert listeners that our output now would be different to how it was
372            last time we were run.
373         */
374
375         if (p == Film::Property::CONTAINER) {
376                 Change (type, PlayerProperty::FILM_CONTAINER, false);
377         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
378                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
379                    so we need new pieces here.
380                 */
381                 if (type == ChangeType::DONE) {
382                         setup_pieces ();
383                 }
384                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
385         } else if (p == Film::Property::AUDIO_PROCESSOR) {
386                 if (type == ChangeType::DONE && _film->audio_processor ()) {
387                         boost::mutex::scoped_lock lm (_mutex);
388                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
389                 }
390         } else if (p == Film::Property::AUDIO_CHANNELS) {
391                 if (type == ChangeType::DONE) {
392                         boost::mutex::scoped_lock lm (_mutex);
393                         _audio_merger.clear ();
394                 }
395         }
396 }
397
398
399 shared_ptr<PlayerVideo>
400 Player::black_player_video_frame (Eyes eyes) const
401 {
402         boost::mutex::scoped_lock lm(_black_image_mutex);
403
404         return std::make_shared<PlayerVideo> (
405                 std::make_shared<const RawImageProxy>(_black_image),
406                 Crop(),
407                 optional<double>(),
408                 _video_container_size,
409                 _video_container_size,
410                 eyes,
411                 Part::WHOLE,
412                 PresetColourConversion::all().front().conversion,
413                 VideoRange::FULL,
414                 std::weak_ptr<Content>(),
415                 boost::optional<Frame>(),
416                 false
417         );
418 }
419
420
421 Frame
422 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
423 {
424         auto s = t - piece->content->position ();
425         s = min (piece->content->length_after_trim(_film), s);
426         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
427
428         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
429            then convert that ContentTime to frames at the content's rate.  However this fails for
430            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
431            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
432
433            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
434         */
435         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
436 }
437
438
439 DCPTime
440 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
441 {
442         /* See comment in dcp_to_content_video */
443         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
444         return d + piece->content->position();
445 }
446
447
448 Frame
449 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
450 {
451         auto s = t - piece->content->position ();
452         s = min (piece->content->length_after_trim(_film), s);
453         /* See notes in dcp_to_content_video */
454         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
455 }
456
457
458 DCPTime
459 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
460 {
461         /* See comment in dcp_to_content_video */
462         return DCPTime::from_frames (f, _film->audio_frame_rate())
463                 - DCPTime (piece->content->trim_start(), piece->frc)
464                 + piece->content->position();
465 }
466
467
468 ContentTime
469 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
470 {
471         auto s = t - piece->content->position ();
472         s = min (piece->content->length_after_trim(_film), s);
473         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
474 }
475
476
477 DCPTime
478 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
479 {
480         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
481 }
482
483
484 vector<shared_ptr<Font>>
485 Player::get_subtitle_fonts ()
486 {
487         boost::mutex::scoped_lock lm (_mutex);
488
489         vector<shared_ptr<Font>> fonts;
490         for (auto piece: _pieces) {
491                 for (auto text: piece->content->text) {
492                         auto text_fonts = text->fonts();
493                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
494                 }
495         }
496
497         return fonts;
498 }
499
500
501 /** Set this player never to produce any video data */
502 void
503 Player::set_ignore_video ()
504 {
505         _ignore_video = true;
506         setup_pieces();
507 }
508
509
510 void
511 Player::set_ignore_audio ()
512 {
513         _ignore_audio = true;
514         setup_pieces();
515 }
516
517
518 void
519 Player::set_ignore_text ()
520 {
521         _ignore_text = true;
522         setup_pieces();
523 }
524
525
526 /** Set the player to always burn open texts into the image regardless of the content settings */
527 void
528 Player::set_always_burn_open_subtitles ()
529 {
530         _always_burn_open_subtitles = true;
531 }
532
533
534 /** Sets up the player to be faster, possibly at the expense of quality */
535 void
536 Player::set_fast ()
537 {
538         _fast = true;
539         setup_pieces();
540 }
541
542
543 void
544 Player::set_play_referenced ()
545 {
546         _play_referenced = true;
547         setup_pieces();
548 }
549
550
551 bool
552 Player::pass ()
553 {
554         boost::mutex::scoped_lock lm (_mutex);
555
556         if (_suspended) {
557                 /* We can't pass in this state */
558                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
559                 return false;
560         }
561
562         if (_playback_length.load() == DCPTime()) {
563                 /* Special; just give one black frame */
564                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
565                 return true;
566         }
567
568         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
569
570         shared_ptr<Piece> earliest_content;
571         optional<DCPTime> earliest_time;
572
573         for (auto i: _pieces) {
574                 if (i->done) {
575                         continue;
576                 }
577
578                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
579                 if (t > i->content->end(_film)) {
580                         i->done = true;
581                 } else {
582
583                         /* Given two choices at the same time, pick the one with texts so we see it before
584                            the video.
585                         */
586                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
587                                 earliest_time = t;
588                                 earliest_content = i;
589                         }
590                 }
591         }
592
593         bool done = false;
594
595         enum {
596                 NONE,
597                 CONTENT,
598                 BLACK,
599                 SILENT
600         } which = NONE;
601
602         if (earliest_content) {
603                 which = CONTENT;
604         }
605
606         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
607                 earliest_time = _black.position ();
608                 which = BLACK;
609         }
610
611         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
612                 earliest_time = _silent.position ();
613                 which = SILENT;
614         }
615
616         switch (which) {
617         case CONTENT:
618         {
619                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
620                 earliest_content->done = earliest_content->decoder->pass ();
621                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
622                 if (dcp && !_play_referenced && dcp->reference_audio()) {
623                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
624                            to `hide' the fact that no audio was emitted during the referenced DCP (though
625                            we need to behave as though it was).
626                         */
627                         _next_audio_time = dcp->end (_film);
628                 }
629                 break;
630         }
631         case BLACK:
632                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
633                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
634                 _black.set_position (_black.position() + one_video_frame());
635                 break;
636         case SILENT:
637         {
638                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
639                 DCPTimePeriod period (_silent.period_at_position());
640                 if (_next_audio_time) {
641                         /* Sometimes the thing that happened last finishes fractionally before
642                            or after this silence.  Bodge the start time of the silence to fix it.
643                            I think this is nothing to worry about since we will just add or
644                            remove a little silence at the end of some content.
645                         */
646                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
647                         /* Let's not worry about less than a frame at 24fps */
648                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
649                         if (error >= too_much_error) {
650                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
651                         }
652                         DCPOMATIC_ASSERT (error < too_much_error);
653                         period.from = *_next_audio_time;
654                 }
655                 if (period.duration() > one_video_frame()) {
656                         period.to = period.from + one_video_frame();
657                 }
658                 fill_audio (period);
659                 _silent.set_position (period.to);
660                 break;
661         }
662         case NONE:
663                 done = true;
664                 break;
665         }
666
667         /* Emit any audio that is ready */
668
669         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
670            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
671            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
672            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
673            that will never come, causing bugs like #2101.
674         */
675         constexpr int ignore_streams_behind = 5;
676
677         using state_pair = std::pair<AudioStreamPtr, StreamState>;
678
679         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
680         auto latest_last_push_end = std::max_element(
681                 _stream_states.begin(),
682                 _stream_states.end(),
683                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
684                 );
685
686         if (latest_last_push_end != _stream_states.end()) {
687                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
688         }
689
690         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
691         std::map<AudioStreamPtr, StreamState> alive_stream_states;
692         for (auto const& i: _stream_states) {
693                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
694                         alive_stream_states.insert(i);
695                 } else {
696                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
697                 }
698         }
699
700         auto pull_to = _playback_length.load();
701         for (auto const& i: alive_stream_states) {
702                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
703                         pull_to = i.second.last_push_end;
704                 }
705         }
706         if (!_silent.done() && _silent.position() < pull_to) {
707                 pull_to = _silent.position();
708         }
709
710         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
711         auto audio = _audio_merger.pull (pull_to);
712         for (auto i = audio.begin(); i != audio.end(); ++i) {
713                 if (_next_audio_time && i->second < *_next_audio_time) {
714                         /* This new data comes before the last we emitted (or the last seek); discard it */
715                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
716                         if (!cut.first) {
717                                 continue;
718                         }
719                         *i = cut;
720                 } else if (_next_audio_time && i->second > *_next_audio_time) {
721                         /* There's a gap between this data and the last we emitted; fill with silence */
722                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
723                 }
724
725                 emit_audio (i->first, i->second);
726         }
727
728         if (done) {
729                 if (_shuffler) {
730                         _shuffler->flush ();
731                 }
732                 for (auto const& i: _delay) {
733                         do_emit_video(i.first, i.second);
734                 }
735
736                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
737                  * However, if we have L and R video files, and one is shorter than the other,
738                  * the fill code in ::video mostly takes care of filling in the gaps.
739                  * However, since it fills at the point when it knows there is more video coming
740                  * at time t (so it should fill any gap up to t) it can't do anything right at the
741                  * end.  This is particularly bad news if the last frame emitted is a LEFT
742                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
743                  * Here's a hack to workaround that particular case.
744                  */
745                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
746                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
747                 }
748         }
749
750         return done;
751 }
752
753
754 /** @return Open subtitles for the frame at the given time, converted to images */
755 optional<PositionImage>
756 Player::open_subtitles_for_frame (DCPTime time) const
757 {
758         list<PositionImage> captions;
759         int const vfr = _film->video_frame_rate();
760
761         for (
762                 auto j:
763                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
764                 ) {
765
766                 /* Bitmap subtitles */
767                 for (auto i: j.bitmap) {
768                         if (!i.image) {
769                                 continue;
770                         }
771
772                         /* i.image will already have been scaled to fit _video_container_size */
773                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
774
775                         captions.push_back (
776                                 PositionImage (
777                                         i.image,
778                                         Position<int> (
779                                                 lrint(_video_container_size.load().width * i.rectangle.x),
780                                                 lrint(_video_container_size.load().height * i.rectangle.y)
781                                                 )
782                                         )
783                                 );
784                 }
785
786                 /* String subtitles (rendered to an image) */
787                 if (!j.string.empty()) {
788                         auto s = render_text(j.string, _video_container_size, time, vfr);
789                         copy (s.begin(), s.end(), back_inserter (captions));
790                 }
791         }
792
793         if (captions.empty()) {
794                 return {};
795         }
796
797         return merge (captions, _subtitle_alignment);
798 }
799
800
801 void
802 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
803 {
804         if (_suspended) {
805                 return;
806         }
807
808         auto piece = weak_piece.lock ();
809         if (!piece) {
810                 return;
811         }
812
813         if (!piece->content->video->use()) {
814                 return;
815         }
816
817         FrameRateChange frc (_film, piece->content);
818         if (frc.skip && (video.frame % 2) == 1) {
819                 return;
820         }
821
822         /* Time of the first frame we will emit */
823         DCPTime const time = content_video_to_dcp (piece, video.frame);
824         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
825
826         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
827            if it's after the content's period here as in that case we still need to fill any gap between
828            `now' and the end of the content's period.
829         */
830         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
831                 return;
832         }
833
834         if (piece->ignore_video && piece->ignore_video->contains(time)) {
835                 return;
836         }
837
838         /* Fill gaps that we discover now that we have some video which needs to be emitted.
839            This is where we need to fill to.
840         */
841         DCPTime fill_to = min (time, piece->content->end(_film));
842
843         if (_next_video_time) {
844                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
845
846                 /* Fill if we have more than half a frame to do */
847                 if ((fill_to - fill_from) > one_video_frame() / 2) {
848                         auto last = _last_video.find (weak_piece);
849                         if (_film->three_d()) {
850                                 auto fill_to_eyes = video.eyes;
851                                 if (fill_to_eyes == Eyes::BOTH) {
852                                         fill_to_eyes = Eyes::LEFT;
853                                 }
854                                 if (fill_to == piece->content->end(_film)) {
855                                         /* Don't fill after the end of the content */
856                                         fill_to_eyes = Eyes::LEFT;
857                                 }
858                                 auto j = fill_from;
859                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
860                                 if (eyes == Eyes::BOTH) {
861                                         eyes = Eyes::LEFT;
862                                 }
863                                 while (j < fill_to || eyes != fill_to_eyes) {
864                                         if (last != _last_video.end()) {
865                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
866                                                 auto copy = last->second->shallow_copy();
867                                                 copy->set_eyes (eyes);
868                                                 emit_video (copy, j);
869                                         } else {
870                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
871                                                 emit_video (black_player_video_frame(eyes), j);
872                                         }
873                                         if (eyes == Eyes::RIGHT) {
874                                                 j += one_video_frame();
875                                         }
876                                         eyes = increment_eyes (eyes);
877                                 }
878                         } else {
879                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
880                                         if (last != _last_video.end()) {
881                                                 emit_video (last->second, j);
882                                         } else {
883                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
884                                         }
885                                 }
886                         }
887                 }
888         }
889
890         auto const content_video = piece->content->video;
891
892         _last_video[weak_piece] = std::make_shared<PlayerVideo>(
893                 video.image,
894                 content_video->actual_crop(),
895                 content_video->fade (_film, video.frame),
896                 scale_for_display(
897                         content_video->scaled_size(_film->frame_size()),
898                         _video_container_size,
899                         _film->frame_size(),
900                         content_video->pixel_quanta()
901                         ),
902                 _video_container_size,
903                 video.eyes,
904                 video.part,
905                 content_video->colour_conversion(),
906                 content_video->range(),
907                 piece->content,
908                 video.frame,
909                 false
910                 );
911
912         DCPTime t = time;
913         for (int i = 0; i < frc.repeat; ++i) {
914                 if (t < piece->content->end(_film)) {
915                         emit_video (_last_video[weak_piece], t);
916                 }
917                 t += one_video_frame ();
918         }
919 }
920
921
922 void
923 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
924 {
925         if (_suspended) {
926                 return;
927         }
928
929         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
930
931         auto piece = weak_piece.lock ();
932         if (!piece) {
933                 return;
934         }
935
936         auto content = piece->content->audio;
937         DCPOMATIC_ASSERT (content);
938
939         int const rfr = content->resampled_frame_rate (_film);
940
941         /* Compute time in the DCP */
942         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
943         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
944
945         /* And the end of this block in the DCP */
946         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
947
948         /* Remove anything that comes before the start or after the end of the content */
949         if (time < piece->content->position()) {
950                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
951                 if (!cut.first) {
952                         /* This audio is entirely discarded */
953                         return;
954                 }
955                 content_audio.audio = cut.first;
956                 time = cut.second;
957         } else if (time > piece->content->end(_film)) {
958                 /* Discard it all */
959                 return;
960         } else if (end > piece->content->end(_film)) {
961                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
962                 if (remaining_frames == 0) {
963                         return;
964                 }
965                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
966         }
967
968         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
969
970         /* Gain and fade */
971
972         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
973         if (content->gain() != 0 || !fade_coeffs.empty()) {
974                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
975                 if (!fade_coeffs.empty()) {
976                         /* Apply both fade and gain */
977                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
978                         auto const channels = gain_buffers->channels();
979                         auto const frames = fade_coeffs.size();
980                         auto data = gain_buffers->data();
981                         auto const gain = db_to_linear (content->gain());
982                         for (auto channel = 0; channel < channels; ++channel) {
983                                 for (auto frame = 0U; frame < frames; ++frame) {
984                                         data[channel][frame] *= gain * fade_coeffs[frame];
985                                 }
986                         }
987                 } else {
988                         /* Just apply gain */
989                         gain_buffers->apply_gain (content->gain());
990                 }
991                 content_audio.audio = gain_buffers;
992         }
993
994         /* Remap */
995
996         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
997
998         /* Process */
999
1000         if (_audio_processor) {
1001                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1002         }
1003
1004         /* Push */
1005
1006         _audio_merger.push (content_audio.audio, time);
1007         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1008         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1009 }
1010
1011
1012 void
1013 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1014 {
1015         if (_suspended) {
1016                 return;
1017         }
1018
1019         auto piece = weak_piece.lock ();
1020         auto content = weak_content.lock ();
1021         if (!piece || !content) {
1022                 return;
1023         }
1024
1025         PlayerText ps;
1026         for (auto& sub: subtitle.subs)
1027         {
1028                 /* Apply content's subtitle offsets */
1029                 sub.rectangle.x += content->x_offset ();
1030                 sub.rectangle.y += content->y_offset ();
1031
1032                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1033                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1034                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1035
1036                 /* Apply content's subtitle scale */
1037                 sub.rectangle.width *= content->x_scale ();
1038                 sub.rectangle.height *= content->y_scale ();
1039
1040                 auto image = sub.image;
1041
1042                 /* We will scale the subtitle up to fit _video_container_size */
1043                 int const width = sub.rectangle.width * _video_container_size.load().width;
1044                 int const height = sub.rectangle.height * _video_container_size.load().height;
1045                 if (width == 0 || height == 0) {
1046                         return;
1047                 }
1048
1049                 dcp::Size scaled_size (width, height);
1050                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1051         }
1052
1053         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1054         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1055 }
1056
1057
1058 void
1059 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1060 {
1061         if (_suspended) {
1062                 return;
1063         }
1064
1065         auto piece = weak_piece.lock ();
1066         auto content = weak_content.lock ();
1067         if (!piece || !content) {
1068                 return;
1069         }
1070
1071         PlayerText ps;
1072         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1073
1074         if (from > piece->content->end(_film)) {
1075                 return;
1076         }
1077
1078         for (auto s: subtitle.subs) {
1079                 s.set_h_position (s.h_position() + content->x_offset());
1080                 s.set_v_position (s.v_position() + content->y_offset());
1081                 float const xs = content->x_scale();
1082                 float const ys = content->y_scale();
1083                 float size = s.size();
1084
1085                 /* Adjust size to express the common part of the scaling;
1086                    e.g. if xs = ys = 0.5 we scale size by 2.
1087                 */
1088                 if (xs > 1e-5 && ys > 1e-5) {
1089                         size *= 1 / min (1 / xs, 1 / ys);
1090                 }
1091                 s.set_size (size);
1092
1093                 /* Then express aspect ratio changes */
1094                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1095                         s.set_aspect_adjust (xs / ys);
1096                 }
1097
1098                 s.set_in (dcp::Time(from.seconds(), 1000));
1099                 ps.string.push_back (s);
1100         }
1101
1102         _active_texts[static_cast<int>(content->type())].add_from(weak_content, ps, from);
1103 }
1104
1105
1106 void
1107 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1108 {
1109         if (_suspended) {
1110                 return;
1111         }
1112
1113         auto content = weak_content.lock ();
1114         if (!content) {
1115                 return;
1116         }
1117
1118         if (!_active_texts[static_cast<int>(content->type())].have(weak_content)) {
1119                 return;
1120         }
1121
1122         auto piece = weak_piece.lock ();
1123         if (!piece) {
1124                 return;
1125         }
1126
1127         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1128
1129         if (dcp_to > piece->content->end(_film)) {
1130                 return;
1131         }
1132
1133         auto from = _active_texts[static_cast<int>(content->type())].add_to(weak_content, dcp_to);
1134
1135         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1136         if (content->use() && !always && !content->burn()) {
1137                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1138         }
1139 }
1140
1141
1142 void
1143 Player::seek (DCPTime time, bool accurate)
1144 {
1145         boost::mutex::scoped_lock lm (_mutex);
1146         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1147
1148         if (_suspended) {
1149                 /* We can't seek in this state */
1150                 return;
1151         }
1152
1153         if (_shuffler) {
1154                 _shuffler->clear ();
1155         }
1156
1157         _delay.clear ();
1158
1159         if (_audio_processor) {
1160                 _audio_processor->flush ();
1161         }
1162
1163         _audio_merger.clear ();
1164         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1165                 _active_texts[i].clear ();
1166         }
1167
1168         for (auto i: _pieces) {
1169                 if (time < i->content->position()) {
1170                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1171                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1172                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1173                            been trimmed to a point between keyframes, or something).
1174                         */
1175                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1176                         i->done = false;
1177                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1178                         /* During; seek to position */
1179                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1180                         i->done = false;
1181                 } else {
1182                         /* After; this piece is done */
1183                         i->done = true;
1184                 }
1185         }
1186
1187         if (accurate) {
1188                 _next_video_time = time;
1189                 _next_video_eyes = Eyes::LEFT;
1190                 _next_audio_time = time;
1191         } else {
1192                 _next_video_time = boost::none;
1193                 _next_video_eyes = boost::none;
1194                 _next_audio_time = boost::none;
1195         }
1196
1197         _black.set_position (time);
1198         _silent.set_position (time);
1199
1200         _last_video.clear ();
1201 }
1202
1203
1204 void
1205 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1206 {
1207         if (!_film->three_d()) {
1208                 if (pv->eyes() == Eyes::LEFT) {
1209                         /* Use left-eye images for both eyes... */
1210                         pv->set_eyes (Eyes::BOTH);
1211                 } else if (pv->eyes() == Eyes::RIGHT) {
1212                         /* ...and discard the right */
1213                         return;
1214                 }
1215         }
1216
1217         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1218            player before the video that requires them.
1219         */
1220         _delay.push_back (make_pair (pv, time));
1221
1222         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1223                 _next_video_time = time + one_video_frame();
1224         }
1225         _next_video_eyes = increment_eyes (pv->eyes());
1226
1227         if (_delay.size() < 3) {
1228                 return;
1229         }
1230
1231         auto to_do = _delay.front();
1232         _delay.pop_front();
1233         do_emit_video (to_do.first, to_do.second);
1234 }
1235
1236
1237 void
1238 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1239 {
1240         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1241                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1242                         _active_texts[i].clear_before (time);
1243                 }
1244         }
1245
1246         auto subtitles = open_subtitles_for_frame (time);
1247         if (subtitles) {
1248                 pv->set_text (subtitles.get ());
1249         }
1250
1251         Video (pv, time);
1252 }
1253
1254
1255 void
1256 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1257 {
1258         /* Log if the assert below is about to fail */
1259         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1260                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1261         }
1262
1263         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1264         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1265         Audio (data, time, _film->audio_frame_rate());
1266         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1267 }
1268
1269
1270 void
1271 Player::fill_audio (DCPTimePeriod period)
1272 {
1273         if (period.from == period.to) {
1274                 return;
1275         }
1276
1277         DCPOMATIC_ASSERT (period.from < period.to);
1278
1279         DCPTime t = period.from;
1280         while (t < period.to) {
1281                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1282                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1283                 if (samples) {
1284                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1285                         silence->make_silent ();
1286                         emit_audio (silence, t);
1287                 }
1288                 t += block;
1289         }
1290 }
1291
1292
1293 DCPTime
1294 Player::one_video_frame () const
1295 {
1296         return DCPTime::from_frames (1, _film->video_frame_rate ());
1297 }
1298
1299
1300 pair<shared_ptr<AudioBuffers>, DCPTime>
1301 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1302 {
1303         auto const discard_time = discard_to - time;
1304         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1305         auto remaining_frames = audio->frames() - discard_frames;
1306         if (remaining_frames <= 0) {
1307                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1308         }
1309         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1310         return make_pair(cut, time + discard_time);
1311 }
1312
1313
1314 void
1315 Player::set_dcp_decode_reduction (optional<int> reduction)
1316 {
1317         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1318
1319         if (reduction == _dcp_decode_reduction.load()) {
1320                 Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1321                 return;
1322         }
1323
1324         _dcp_decode_reduction = reduction;
1325         setup_pieces();
1326
1327         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1328 }
1329
1330
1331 optional<DCPTime>
1332 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1333 {
1334         boost::mutex::scoped_lock lm (_mutex);
1335
1336         for (auto i: _pieces) {
1337                 if (i->content == content) {
1338                         return content_time_to_dcp (i, t);
1339                 }
1340         }
1341
1342         /* We couldn't find this content; perhaps things are being changed over */
1343         return {};
1344 }
1345
1346
1347 optional<ContentTime>
1348 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1349 {
1350         boost::mutex::scoped_lock lm (_mutex);
1351
1352         for (auto i: _pieces) {
1353                 if (i->content == content) {
1354                         return dcp_to_content_time (i, t);
1355                 }
1356         }
1357
1358         /* We couldn't find this content; perhaps things are being changed over */
1359         return {};
1360 }
1361
1362
1363 shared_ptr<const Playlist>
1364 Player::playlist () const
1365 {
1366         return _playlist ? _playlist : _film->playlist();
1367 }
1368
1369
1370 void
1371 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1372 {
1373         if (_suspended) {
1374                 return;
1375         }
1376
1377         auto piece = weak_piece.lock ();
1378         DCPOMATIC_ASSERT (piece);
1379
1380         auto const vfr = _film->video_frame_rate();
1381
1382         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1383         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1384                 return;
1385         }
1386
1387         Atmos (data.data, dcp_time, data.metadata);
1388 }
1389