Change how video timing is done.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "render_text.h"
51 #include "shuffler.h"
52 #include "text_content.h"
53 #include "text_decoder.h"
54 #include "timer.h"
55 #include "video_decoder.h"
56 #include <dcp/reel.h>
57 #include <dcp/reel_closed_caption_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_sound_asset.h>
60 #include <dcp/reel_subtitle_asset.h>
61 #include <algorithm>
62 #include <iostream>
63 #include <stdint.h>
64
65 #include "i18n.h"
66
67
68 using std::copy;
69 using std::cout;
70 using std::dynamic_pointer_cast;
71 using std::list;
72 using std::make_pair;
73 using std::make_shared;
74 using std::max;
75 using std::min;
76 using std::pair;
77 using std::shared_ptr;
78 using std::vector;
79 using std::weak_ptr;
80 using boost::optional;
81 using boost::scoped_ptr;
82 #if BOOST_VERSION >= 106100
83 using namespace boost::placeholders;
84 #endif
85 using namespace dcpomatic;
86
87
88 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
89 int const PlayerProperty::PLAYLIST = 701;
90 int const PlayerProperty::FILM_CONTAINER = 702;
91 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
92 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
93 int const PlayerProperty::PLAYBACK_LENGTH = 705;
94
95
96 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
97         : _film (film)
98         , _suspended (0)
99         , _ignore_video(false)
100         , _ignore_audio(false)
101         , _ignore_text(false)
102         , _always_burn_open_subtitles(false)
103         , _fast(false)
104         , _tolerant (film->tolerant())
105         , _play_referenced(false)
106         , _audio_merger (_film->audio_frame_rate())
107         , _subtitle_alignment (subtitle_alignment)
108 {
109         construct ();
110 }
111
112
113 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
114         : _film (film)
115         , _playlist (playlist_)
116         , _suspended (0)
117         , _ignore_video(false)
118         , _ignore_audio(false)
119         , _ignore_text(false)
120         , _always_burn_open_subtitles(false)
121         , _fast(false)
122         , _tolerant (film->tolerant())
123         , _play_referenced(false)
124         , _audio_merger (_film->audio_frame_rate())
125 {
126         construct ();
127 }
128
129
130 void
131 Player::construct ()
132 {
133         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
134         /* The butler must hear about this first, so since we are proxying this through to the butler we must
135            be first.
136         */
137         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
138         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
139         set_video_container_size (_film->frame_size ());
140
141         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
142
143         setup_pieces ();
144         seek (DCPTime (), true);
145 }
146
147
148 bool
149 have_video (shared_ptr<const Content> content)
150 {
151         return static_cast<bool>(content->video) && content->video->use() && content->can_be_played();
152 }
153
154
155 bool
156 have_audio (shared_ptr<const Content> content)
157 {
158         return static_cast<bool>(content->audio) && content->can_be_played();
159 }
160
161
162 void
163 Player::setup_pieces ()
164 {
165         boost::mutex::scoped_lock lm (_mutex);
166
167         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
168
169         auto old_pieces = _pieces;
170         _pieces.clear ();
171
172         auto playlist_content = playlist()->content();
173         bool const have_threed = std::any_of(
174                 playlist_content.begin(),
175                 playlist_content.end(),
176                 [](shared_ptr<const Content> c) {
177                         return c->video && (c->video->frame_type() == VideoFrameType::THREE_D_LEFT || c->video->frame_type() == VideoFrameType::THREE_D_RIGHT);
178                 });
179
180
181         if (have_threed) {
182                 _shuffler.reset(new Shuffler());
183                 _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
184         }
185
186         for (auto i: playlist()->content()) {
187
188                 if (!i->paths_valid ()) {
189                         continue;
190                 }
191
192                 if (_ignore_video && _ignore_audio && i->text.empty()) {
193                         /* We're only interested in text and this content has none */
194                         continue;
195                 }
196
197                 shared_ptr<Decoder> old_decoder;
198                 for (auto j: old_pieces) {
199                         if (j->content == i) {
200                                 old_decoder = j->decoder;
201                                 break;
202                         }
203                 }
204
205                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
206                 DCPOMATIC_ASSERT (decoder);
207
208                 FrameRateChange frc (_film, i);
209
210                 if (decoder->video && _ignore_video) {
211                         decoder->video->set_ignore (true);
212                 }
213
214                 if (decoder->audio && _ignore_audio) {
215                         decoder->audio->set_ignore (true);
216                 }
217
218                 if (_ignore_text) {
219                         for (auto i: decoder->text) {
220                                 i->set_ignore (true);
221                         }
222                 }
223
224                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
225                 if (dcp) {
226                         dcp->set_decode_referenced (_play_referenced);
227                         if (_play_referenced) {
228                                 dcp->set_forced_reduction (_dcp_decode_reduction);
229                         }
230                 }
231
232                 auto piece = make_shared<Piece>(i, decoder, frc);
233                 _pieces.push_back (piece);
234
235                 if (decoder->video) {
236                         if (have_threed) {
237                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
238                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
239                         } else {
240                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
241                         }
242                 }
243
244                 if (decoder->audio) {
245                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
246                 }
247
248                 auto j = decoder->text.begin();
249
250                 while (j != decoder->text.end()) {
251                         (*j)->BitmapStart.connect (
252                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
253                                 );
254                         (*j)->PlainStart.connect (
255                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
256                                 );
257                         (*j)->Stop.connect (
258                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
259                                 );
260
261                         ++j;
262                 }
263
264                 if (decoder->atmos) {
265                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
266                 }
267         }
268
269         _stream_states.clear ();
270         for (auto i: _pieces) {
271                 if (i->content->audio) {
272                         for (auto j: i->content->audio->streams()) {
273                                 _stream_states[j] = StreamState (i, i->content->position ());
274                         }
275                 }
276         }
277
278         auto ignore_overlap = [](shared_ptr<VideoContent> v) {
279                 return v && v->use() && v->frame_type() != VideoFrameType::THREE_D_LEFT && v->frame_type() != VideoFrameType::THREE_D_RIGHT;
280         };
281
282         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
283                 if (ignore_overlap((*i)->content->video)) {
284                         /* Look for content later in the content list with in-use video that overlaps this */
285                         auto const period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
286                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
287                                 if ((*j)->content->video && ignore_overlap((*j)->content->video)) {
288                                         (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
289                                 }
290                         }
291                 }
292         }
293
294         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
295         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
296
297         _next_video_time = boost::none;
298         _next_audio_time = boost::none;
299 }
300
301
302 void
303 Player::playlist_content_change (ChangeType type, int property, bool frequent)
304 {
305         if (property == VideoContentProperty::CROP) {
306                 if (type == ChangeType::DONE) {
307                         boost::mutex::scoped_lock lm (_mutex);
308                         for (auto const& i: _delay) {
309                                 i.first->reset_metadata(_film, _video_container_size);
310                         }
311                 }
312         } else {
313                 if (type == ChangeType::PENDING) {
314                         /* The player content is probably about to change, so we can't carry on
315                            until that has happened and we've rebuilt our pieces.  Stop pass()
316                            and seek() from working until then.
317                         */
318                         ++_suspended;
319                 } else if (type == ChangeType::DONE) {
320                         /* A change in our content has gone through.  Re-build our pieces. */
321                         setup_pieces ();
322                         --_suspended;
323                 } else if (type == ChangeType::CANCELLED) {
324                         --_suspended;
325                 }
326         }
327
328         Change (type, property, frequent);
329 }
330
331
332 void
333 Player::set_video_container_size (dcp::Size s)
334 {
335         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
336
337         if (s == _video_container_size) {
338                 Change(ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
339                 return;
340         }
341
342         _video_container_size = s;
343
344         {
345                 boost::mutex::scoped_lock lm(_black_image_mutex);
346                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
347                 _black_image->make_black ();
348         }
349
350         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
351 }
352
353
354 void
355 Player::playlist_change (ChangeType type)
356 {
357         if (type == ChangeType::DONE) {
358                 setup_pieces ();
359         }
360         Change (type, PlayerProperty::PLAYLIST, false);
361 }
362
363
364 void
365 Player::film_change (ChangeType type, Film::Property p)
366 {
367         /* Here we should notice Film properties that affect our output, and
368            alert listeners that our output now would be different to how it was
369            last time we were run.
370         */
371
372         if (p == Film::Property::CONTAINER) {
373                 Change (type, PlayerProperty::FILM_CONTAINER, false);
374         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
375                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
376                    so we need new pieces here.
377                 */
378                 if (type == ChangeType::DONE) {
379                         setup_pieces ();
380                 }
381                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
382         } else if (p == Film::Property::AUDIO_PROCESSOR) {
383                 if (type == ChangeType::DONE && _film->audio_processor ()) {
384                         boost::mutex::scoped_lock lm (_mutex);
385                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
386                 }
387         } else if (p == Film::Property::AUDIO_CHANNELS) {
388                 if (type == ChangeType::DONE) {
389                         boost::mutex::scoped_lock lm (_mutex);
390                         _audio_merger.clear ();
391                 }
392         }
393 }
394
395
396 shared_ptr<PlayerVideo>
397 Player::black_player_video_frame (Eyes eyes) const
398 {
399         boost::mutex::scoped_lock lm(_black_image_mutex);
400
401         return std::make_shared<PlayerVideo> (
402                 make_shared<const RawImageProxy>(_black_image),
403                 Crop(),
404                 optional<double>(),
405                 _video_container_size,
406                 _video_container_size,
407                 eyes,
408                 Part::WHOLE,
409                 PresetColourConversion::all().front().conversion,
410                 VideoRange::FULL,
411                 std::weak_ptr<Content>(),
412                 boost::optional<dcpomatic::ContentTime>(),
413                 false
414         );
415 }
416
417
418 Frame
419 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
420 {
421         auto s = t - piece->content->position ();
422         s = min (piece->content->length_after_trim(_film), s);
423         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
424
425         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
426            then convert that ContentTime to frames at the content's rate.  However this fails for
427            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
428            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
429
430            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
431         */
432         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
433 }
434
435
436 DCPTime
437 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
438 {
439         /* See comment in dcp_to_content_video */
440         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
441         return d + piece->content->position();
442 }
443
444
445 Frame
446 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
447 {
448         auto s = t - piece->content->position ();
449         s = min (piece->content->length_after_trim(_film), s);
450         /* See notes in dcp_to_content_video */
451         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
452 }
453
454
455 DCPTime
456 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
457 {
458         /* See comment in dcp_to_content_video */
459         return DCPTime::from_frames (f, _film->audio_frame_rate())
460                 - DCPTime (piece->content->trim_start(), piece->frc)
461                 + piece->content->position();
462 }
463
464
465 ContentTime
466 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
467 {
468         auto s = t - piece->content->position ();
469         s = min (piece->content->length_after_trim(_film), s);
470         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
471 }
472
473
474 DCPTime
475 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
476 {
477         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
478 }
479
480
481 vector<shared_ptr<Font>>
482 Player::get_subtitle_fonts ()
483 {
484         boost::mutex::scoped_lock lm (_mutex);
485
486         vector<shared_ptr<Font>> fonts;
487         for (auto piece: _pieces) {
488                 for (auto text: piece->content->text) {
489                         auto text_fonts = text->fonts();
490                         copy (text_fonts.begin(), text_fonts.end(), back_inserter(fonts));
491                 }
492         }
493
494         return fonts;
495 }
496
497
498 /** Set this player never to produce any video data */
499 void
500 Player::set_ignore_video ()
501 {
502         _ignore_video = true;
503         setup_pieces();
504 }
505
506
507 void
508 Player::set_ignore_audio ()
509 {
510         _ignore_audio = true;
511         setup_pieces();
512 }
513
514
515 void
516 Player::set_ignore_text ()
517 {
518         _ignore_text = true;
519         setup_pieces();
520 }
521
522
523 /** Set the player to always burn open texts into the image regardless of the content settings */
524 void
525 Player::set_always_burn_open_subtitles ()
526 {
527         _always_burn_open_subtitles = true;
528 }
529
530
531 /** Sets up the player to be faster, possibly at the expense of quality */
532 void
533 Player::set_fast ()
534 {
535         _fast = true;
536         setup_pieces();
537 }
538
539
540 void
541 Player::set_play_referenced ()
542 {
543         _play_referenced = true;
544         setup_pieces();
545 }
546
547
548 bool
549 Player::pass ()
550 {
551         boost::mutex::scoped_lock lm (_mutex);
552
553         if (_suspended) {
554                 /* We can't pass in this state */
555                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
556                 return false;
557         }
558
559         if (_playback_length.load() == DCPTime()) {
560                 /* Special; just give one black frame */
561                 use_video(black_player_video_frame(Eyes::BOTH), DCPTime(), one_video_frame());
562                 return true;
563         }
564
565         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
566
567         shared_ptr<Piece> earliest_content;
568         optional<DCPTime> earliest_time;
569
570         for (auto i: _pieces) {
571                 if (i->done) {
572                         continue;
573                 }
574
575                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
576                 if (t > i->content->end(_film)) {
577                         i->done = true;
578                 } else {
579
580                         /* Given two choices at the same time, pick the one with texts so we see it before
581                            the video.
582                         */
583                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
584                                 earliest_time = t;
585                                 earliest_content = i;
586                         }
587                 }
588         }
589
590         bool done = false;
591
592         enum {
593                 NONE,
594                 CONTENT,
595                 BLACK,
596                 SILENT
597         } which = NONE;
598
599         if (earliest_content) {
600                 which = CONTENT;
601         }
602
603         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
604                 earliest_time = _black.position ();
605                 which = BLACK;
606         }
607
608         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
609                 earliest_time = _silent.position ();
610                 which = SILENT;
611         }
612
613         switch (which) {
614         case CONTENT:
615         {
616                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
617                 earliest_content->done = earliest_content->decoder->pass ();
618                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
619                 if (dcp && !_play_referenced) {
620                         if (dcp->reference_video()) {
621                                 _next_video_time = dcp->end(_film);
622                         }
623                         if (dcp->reference_audio()) {
624                                 /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
625                                    to `hide' the fact that no audio was emitted during the referenced DCP (though
626                                    we need to behave as though it was).
627                                 */
628                                 _next_audio_time = dcp->end(_film);
629                         }
630                 }
631                 break;
632         }
633         case BLACK:
634                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
635                 use_video(black_player_video_frame(Eyes::BOTH), _black.position(), _black.period_at_position().to);
636                 _black.set_position (_black.position() + one_video_frame());
637                 break;
638         case SILENT:
639         {
640                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
641                 DCPTimePeriod period (_silent.period_at_position());
642                 if (_next_audio_time) {
643                         /* Sometimes the thing that happened last finishes fractionally before
644                            or after this silence.  Bodge the start time of the silence to fix it.
645                            I think this is nothing to worry about since we will just add or
646                            remove a little silence at the end of some content.
647                         */
648                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
649                         /* Let's not worry about less than a frame at 24fps */
650                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
651                         if (error >= too_much_error) {
652                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
653                         }
654                         DCPOMATIC_ASSERT (error < too_much_error);
655                         period.from = *_next_audio_time;
656                 }
657                 if (period.duration() > one_video_frame()) {
658                         period.to = period.from + one_video_frame();
659                 }
660                 fill_audio (period);
661                 _silent.set_position (period.to);
662                 break;
663         }
664         case NONE:
665                 done = true;
666                 break;
667         }
668
669         /* Emit any audio that is ready */
670
671         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
672            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
673            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
674            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
675            that will never come, causing bugs like #2101.
676         */
677         constexpr int ignore_streams_behind = 5;
678
679         using state_pair = std::pair<AudioStreamPtr, StreamState>;
680
681         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
682         auto latest_last_push_end = std::max_element(
683                 _stream_states.begin(),
684                 _stream_states.end(),
685                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
686                 );
687
688         if (latest_last_push_end != _stream_states.end()) {
689                 LOG_DEBUG_PLAYER("Leading audio stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
690         }
691
692         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
693         std::map<AudioStreamPtr, StreamState> alive_stream_states;
694         for (auto const& i: _stream_states) {
695                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
696                         alive_stream_states.insert(i);
697                 } else {
698                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
699                 }
700         }
701
702         auto pull_to = _playback_length.load();
703         for (auto const& i: alive_stream_states) {
704                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
705                         pull_to = i.second.last_push_end;
706                 }
707         }
708         if (!_silent.done() && _silent.position() < pull_to) {
709                 pull_to = _silent.position();
710         }
711
712         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
713         auto audio = _audio_merger.pull (pull_to);
714         for (auto i = audio.begin(); i != audio.end(); ++i) {
715                 if (_next_audio_time && i->second < *_next_audio_time) {
716                         /* This new data comes before the last we emitted (or the last seek); discard it */
717                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
718                         if (!cut.first) {
719                                 continue;
720                         }
721                         *i = cut;
722                 } else if (_next_audio_time && i->second > *_next_audio_time) {
723                         /* There's a gap between this data and the last we emitted; fill with silence */
724                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
725                 }
726
727                 emit_audio (i->first, i->second);
728         }
729
730         if (done) {
731                 emit_video_until(_film->length());
732
733                 if (_shuffler) {
734                         _shuffler->flush ();
735                 }
736                 for (auto const& i: _delay) {
737                         emit_video(i.first, i.second);
738                 }
739         }
740
741         return done;
742 }
743
744
745 /** @return Open subtitles for the frame at the given time, converted to images */
746 optional<PositionImage>
747 Player::open_subtitles_for_frame (DCPTime time) const
748 {
749         list<PositionImage> captions;
750         int const vfr = _film->video_frame_rate();
751
752         for (
753                 auto j:
754                 _active_texts[TextType::OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
755                 ) {
756
757                 /* Bitmap subtitles */
758                 for (auto i: j.bitmap) {
759                         if (!i.image) {
760                                 continue;
761                         }
762
763                         /* i.image will already have been scaled to fit _video_container_size */
764                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.load().width, i.rectangle.height * _video_container_size.load().height);
765
766                         captions.push_back (
767                                 PositionImage (
768                                         i.image,
769                                         Position<int> (
770                                                 lrint(_video_container_size.load().width * i.rectangle.x),
771                                                 lrint(_video_container_size.load().height * i.rectangle.y)
772                                                 )
773                                         )
774                                 );
775                 }
776
777                 /* String subtitles (rendered to an image) */
778                 if (!j.string.empty()) {
779                         auto s = render_text(j.string, _video_container_size, time, vfr);
780                         copy (s.begin(), s.end(), back_inserter (captions));
781                 }
782         }
783
784         if (captions.empty()) {
785                 return {};
786         }
787
788         return merge (captions, _subtitle_alignment);
789 }
790
791
792 void
793 Player::emit_video_until(DCPTime time)
794 {
795         auto frame = [this](shared_ptr<PlayerVideo> pv, DCPTime time) {
796                 /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
797                    player before the video that requires them.
798                 */
799                 _delay.push_back(make_pair(pv, time));
800
801                 if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
802                         _next_video_time = time + one_video_frame();
803                 }
804
805                 if (_delay.size() < 3) {
806                         return;
807                 }
808
809                 auto to_do = _delay.front();
810                 _delay.pop_front();
811                 emit_video(to_do.first, to_do.second);
812         };
813
814         auto const age_threshold = one_video_frame() * 2;
815
816         while (_next_video_time.get_value_or({}) < time) {
817                 auto left = _last_video[Eyes::LEFT];
818                 auto right = _last_video[Eyes::RIGHT];
819                 auto both = _last_video[Eyes::BOTH];
820
821                 auto const next = _next_video_time.get_value_or({});
822
823                 if (
824                         left.first &&
825                         right.first &&
826                         (!both.first || (left.second >= both.second && right.second >= both.second)) &&
827                         (left.second - next) < age_threshold &&
828                         (right.second - next) < age_threshold
829                    ) {
830                         frame(left.first, next);
831                         frame(right.first, next);
832                 } else if (both.first && (both.second - next) < age_threshold) {
833                         frame(both.first, next);
834                         LOG_DEBUG_PLAYER("Content %1 selected for DCP %2 (age %3)", to_string(both.second), to_string(next), to_string(both.second - next));
835                 } else {
836                         frame(black_player_video_frame(Eyes::BOTH), next);
837                         LOG_DEBUG_PLAYER("Black selected for DCP %1", to_string(next));
838                 }
839         }
840 }
841
842
843 void
844 Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
845 {
846         if (_suspended) {
847                 return;
848         }
849
850         auto piece = weak_piece.lock ();
851         if (!piece) {
852                 return;
853         }
854
855         if (!piece->content->video->use()) {
856                 return;
857         }
858
859         auto const three_d = _film->three_d();
860
861         if (!three_d) {
862                 if (video.eyes == Eyes::LEFT) {
863                         /* Use left-eye images for both eyes... */
864                         video.eyes = Eyes::BOTH;
865                 } else if (video.eyes == Eyes::RIGHT) {
866                         /* ...and discard the right */
867                         return;
868                 }
869         }
870
871         FrameRateChange frc (_film, piece->content);
872
873         /* Time of the frame we just received within the DCP */
874         auto const time = content_time_to_dcp(piece, video.time);
875         LOG_DEBUG_PLAYER("Received video frame %1 %2 eyes %3", to_string(video.time), to_string(time), static_cast<int>(video.eyes));
876
877         if (time < piece->content->position()) {
878                 return;
879         }
880
881         if (piece->ignore_video && piece->ignore_video->contains(time)) {
882                 return;
883         }
884
885         if (!_next_video_time) {
886                 _next_video_time = time.round(_film->video_frame_rate());
887         }
888
889         auto const content_video = piece->content->video;
890         use_video(
891                 std::make_shared<PlayerVideo>(
892                         video.image,
893                         content_video->actual_crop(),
894                         content_video->fade(_film, video.time),
895                         scale_for_display(
896                                 content_video->scaled_size(_film->frame_size()),
897                                 _video_container_size,
898                                 _film->frame_size(),
899                                 content_video->pixel_quanta()
900                                 ),
901                         _video_container_size,
902                         video.eyes,
903                         video.part,
904                         content_video->colour_conversion(),
905                         content_video->range(),
906                         piece->content,
907                         video.time,
908                         false
909                         ),
910                         time,
911                         piece->content->end(_film)
912                                 );
913 }
914
915
916 void
917 Player::use_video(shared_ptr<PlayerVideo> pv, DCPTime time, DCPTime end)
918 {
919         _last_video[pv->eyes()] = { pv, time };
920         if (pv->eyes() != Eyes::LEFT) {
921                 emit_video_until(std::min(time + one_video_frame() / 2, end));
922         }
923 }
924
925
926 void
927 Player::audio (weak_ptr<Piece> weak_piece, AudioStreamPtr stream, ContentAudio content_audio)
928 {
929         if (_suspended) {
930                 return;
931         }
932
933         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
934
935         auto piece = weak_piece.lock ();
936         if (!piece) {
937                 return;
938         }
939
940         auto content = piece->content->audio;
941         DCPOMATIC_ASSERT (content);
942
943         int const rfr = content->resampled_frame_rate (_film);
944
945         /* Compute time in the DCP */
946         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
947         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
948
949         /* And the end of this block in the DCP */
950         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
951
952         /* Remove anything that comes before the start or after the end of the content */
953         if (time < piece->content->position()) {
954                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
955                 if (!cut.first) {
956                         /* This audio is entirely discarded */
957                         return;
958                 }
959                 content_audio.audio = cut.first;
960                 time = cut.second;
961         } else if (time > piece->content->end(_film)) {
962                 /* Discard it all */
963                 return;
964         } else if (end > piece->content->end(_film)) {
965                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
966                 if (remaining_frames == 0) {
967                         return;
968                 }
969                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
970         }
971
972         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
973
974         /* Gain and fade */
975
976         auto const fade_coeffs = content->fade (stream, content_audio.frame, content_audio.audio->frames(), rfr);
977         if (content->gain() != 0 || !fade_coeffs.empty()) {
978                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
979                 if (!fade_coeffs.empty()) {
980                         /* Apply both fade and gain */
981                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
982                         auto const channels = gain_buffers->channels();
983                         auto const frames = fade_coeffs.size();
984                         auto data = gain_buffers->data();
985                         auto const gain = db_to_linear (content->gain());
986                         for (auto channel = 0; channel < channels; ++channel) {
987                                 for (auto frame = 0U; frame < frames; ++frame) {
988                                         data[channel][frame] *= gain * fade_coeffs[frame];
989                                 }
990                         }
991                 } else {
992                         /* Just apply gain */
993                         gain_buffers->apply_gain (content->gain());
994                 }
995                 content_audio.audio = gain_buffers;
996         }
997
998         /* Remap */
999
1000         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1001
1002         /* Process */
1003
1004         if (_audio_processor) {
1005                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1006         }
1007
1008         /* Push */
1009
1010         _audio_merger.push (content_audio.audio, time);
1011         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1012         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1013 }
1014
1015
1016 void
1017 Player::bitmap_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentBitmapText subtitle)
1018 {
1019         if (_suspended) {
1020                 return;
1021         }
1022
1023         auto piece = weak_piece.lock ();
1024         auto content = weak_content.lock ();
1025         if (!piece || !content) {
1026                 return;
1027         }
1028
1029         PlayerText ps;
1030         for (auto& sub: subtitle.subs)
1031         {
1032                 /* Apply content's subtitle offsets */
1033                 sub.rectangle.x += content->x_offset ();
1034                 sub.rectangle.y += content->y_offset ();
1035
1036                 /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1037                 sub.rectangle.x -= sub.rectangle.width * ((content->x_scale() - 1) / 2);
1038                 sub.rectangle.y -= sub.rectangle.height * ((content->y_scale() - 1) / 2);
1039
1040                 /* Apply content's subtitle scale */
1041                 sub.rectangle.width *= content->x_scale ();
1042                 sub.rectangle.height *= content->y_scale ();
1043
1044                 auto image = sub.image;
1045
1046                 /* We will scale the subtitle up to fit _video_container_size */
1047                 int const width = sub.rectangle.width * _video_container_size.load().width;
1048                 int const height = sub.rectangle.height * _video_container_size.load().height;
1049                 if (width == 0 || height == 0) {
1050                         return;
1051                 }
1052
1053                 dcp::Size scaled_size (width, height);
1054                 ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), sub.rectangle));
1055         }
1056
1057         DCPTime from(content_time_to_dcp(piece, subtitle.from()));
1058         _active_texts[content->type()].add_from(weak_content, ps, from);
1059 }
1060
1061
1062 void
1063 Player::plain_text_start (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentStringText subtitle)
1064 {
1065         if (_suspended) {
1066                 return;
1067         }
1068
1069         auto piece = weak_piece.lock ();
1070         auto content = weak_content.lock ();
1071         if (!piece || !content) {
1072                 return;
1073         }
1074
1075         PlayerText ps;
1076         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1077
1078         if (from > piece->content->end(_film)) {
1079                 return;
1080         }
1081
1082         for (auto s: subtitle.subs) {
1083                 s.set_h_position (s.h_position() + content->x_offset());
1084                 s.set_v_position (s.v_position() + content->y_offset());
1085                 float const xs = content->x_scale();
1086                 float const ys = content->y_scale();
1087                 float size = s.size();
1088
1089                 /* Adjust size to express the common part of the scaling;
1090                    e.g. if xs = ys = 0.5 we scale size by 2.
1091                 */
1092                 if (xs > 1e-5 && ys > 1e-5) {
1093                         size *= 1 / min (1 / xs, 1 / ys);
1094                 }
1095                 s.set_size (size);
1096
1097                 /* Then express aspect ratio changes */
1098                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1099                         s.set_aspect_adjust (xs / ys);
1100                 }
1101
1102                 s.set_in (dcp::Time(from.seconds(), 1000));
1103                 ps.string.push_back (s);
1104         }
1105
1106         _active_texts[content->type()].add_from(weak_content, ps, from);
1107 }
1108
1109
1110 void
1111 Player::subtitle_stop (weak_ptr<Piece> weak_piece, weak_ptr<const TextContent> weak_content, ContentTime to)
1112 {
1113         if (_suspended) {
1114                 return;
1115         }
1116
1117         auto content = weak_content.lock ();
1118         if (!content) {
1119                 return;
1120         }
1121
1122         if (!_active_texts[content->type()].have(weak_content)) {
1123                 return;
1124         }
1125
1126         auto piece = weak_piece.lock ();
1127         if (!piece) {
1128                 return;
1129         }
1130
1131         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1132
1133         if (dcp_to > piece->content->end(_film)) {
1134                 return;
1135         }
1136
1137         auto from = _active_texts[content->type()].add_to(weak_content, dcp_to);
1138
1139         bool const always = (content->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1140         if (content->use() && !always && !content->burn()) {
1141                 Text (from.first, content->type(), content->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod(from.second, dcp_to));
1142         }
1143 }
1144
1145
1146 void
1147 Player::seek (DCPTime time, bool accurate)
1148 {
1149         boost::mutex::scoped_lock lm (_mutex);
1150         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1151
1152         if (_suspended) {
1153                 /* We can't seek in this state */
1154                 return;
1155         }
1156
1157         if (_shuffler) {
1158                 _shuffler->clear ();
1159         }
1160
1161         _delay.clear ();
1162
1163         if (_audio_processor) {
1164                 _audio_processor->flush ();
1165         }
1166
1167         _audio_merger.clear ();
1168         std::for_each(_active_texts.begin(), _active_texts.end(), [](ActiveText& a) { a.clear(); });
1169
1170         for (auto i: _pieces) {
1171                 if (time < i->content->position()) {
1172                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1173                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1174                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1175                            been trimmed to a point between keyframes, or something).
1176                         */
1177                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1178                         i->done = false;
1179                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1180                         /* During; seek to position */
1181                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1182                         i->done = false;
1183                 } else {
1184                         /* After; this piece is done */
1185                         i->done = true;
1186                 }
1187         }
1188
1189         if (accurate) {
1190                 _next_video_time = time;
1191                 _next_audio_time = time;
1192         } else {
1193                 _next_video_time = boost::none;
1194                 _next_audio_time = boost::none;
1195         }
1196
1197         _black.set_position (time);
1198         _silent.set_position (time);
1199
1200         _last_video[Eyes::LEFT] = {};
1201         _last_video[Eyes::RIGHT] = {};
1202         _last_video[Eyes::BOTH] = {};
1203 }
1204
1205 void
1206 Player::emit_video(shared_ptr<PlayerVideo> pv, DCPTime time)
1207 {
1208         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1209                 std::for_each(_active_texts.begin(), _active_texts.end(), [time](ActiveText& a) { a.clear_before(time); });
1210         }
1211
1212         auto subtitles = open_subtitles_for_frame (time);
1213         if (subtitles) {
1214                 pv->set_text (subtitles.get ());
1215         }
1216
1217         Video (pv, time);
1218 }
1219
1220
1221 void
1222 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1223 {
1224         /* Log if the assert below is about to fail */
1225         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1226                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1227         }
1228
1229         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1230         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1231         Audio (data, time, _film->audio_frame_rate());
1232         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1233 }
1234
1235
1236 void
1237 Player::fill_audio (DCPTimePeriod period)
1238 {
1239         if (period.from == period.to) {
1240                 return;
1241         }
1242
1243         DCPOMATIC_ASSERT (period.from < period.to);
1244
1245         DCPTime t = period.from;
1246         while (t < period.to) {
1247                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1248                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1249                 if (samples) {
1250                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1251                         silence->make_silent ();
1252                         emit_audio (silence, t);
1253                 }
1254                 t += block;
1255         }
1256 }
1257
1258
1259 DCPTime
1260 Player::one_video_frame () const
1261 {
1262         return DCPTime::from_frames (1, _film->video_frame_rate ());
1263 }
1264
1265
1266 pair<shared_ptr<AudioBuffers>, DCPTime>
1267 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1268 {
1269         auto const discard_time = discard_to - time;
1270         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1271         auto remaining_frames = audio->frames() - discard_frames;
1272         if (remaining_frames <= 0) {
1273                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1274         }
1275         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1276         return make_pair(cut, time + discard_time);
1277 }
1278
1279
1280 void
1281 Player::set_dcp_decode_reduction (optional<int> reduction)
1282 {
1283         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1284
1285         if (reduction == _dcp_decode_reduction.load()) {
1286                 Change(ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1287                 return;
1288         }
1289
1290         _dcp_decode_reduction = reduction;
1291         setup_pieces();
1292
1293         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1294 }
1295
1296
1297 optional<DCPTime>
1298 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
1299 {
1300         boost::mutex::scoped_lock lm (_mutex);
1301
1302         for (auto i: _pieces) {
1303                 if (i->content == content) {
1304                         return content_time_to_dcp (i, t);
1305                 }
1306         }
1307
1308         /* We couldn't find this content; perhaps things are being changed over */
1309         return {};
1310 }
1311
1312
1313 optional<ContentTime>
1314 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
1315 {
1316         boost::mutex::scoped_lock lm (_mutex);
1317
1318         for (auto i: _pieces) {
1319                 if (i->content == content) {
1320                         return dcp_to_content_time (i, t);
1321                 }
1322         }
1323
1324         /* We couldn't find this content; perhaps things are being changed over */
1325         return {};
1326 }
1327
1328
1329 shared_ptr<const Playlist>
1330 Player::playlist () const
1331 {
1332         return _playlist ? _playlist : _film->playlist();
1333 }
1334
1335
1336 void
1337 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1338 {
1339         if (_suspended) {
1340                 return;
1341         }
1342
1343         auto piece = weak_piece.lock ();
1344         DCPOMATIC_ASSERT (piece);
1345
1346         auto const vfr = _film->video_frame_rate();
1347
1348         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1349         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1350                 return;
1351         }
1352
1353         Atmos (data.data, dcp_time, data.metadata);
1354 }
1355