More enum class additions.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <stdint.h>
60 #include <algorithm>
61 #include <iostream>
62
63 #include "i18n.h"
64
65 using std::copy;
66 using std::cout;
67 using std::dynamic_pointer_cast;
68 using std::list;
69 using std::make_pair;
70 using std::make_shared;
71 using std::map;
72 using std::max;
73 using std::min;
74 using std::min;
75 using std::pair;
76 using std::shared_ptr;
77 using std::vector;
78 using std::weak_ptr;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
83 #endif
84 using namespace dcpomatic;
85
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92
93 Player::Player (shared_ptr<const Film> film)
94         : _film (film)
95         , _suspended (0)
96         , _tolerant (film->tolerant())
97         , _audio_merger (_film->audio_frame_rate())
98 {
99         construct ();
100 }
101
102 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
103         : _film (film)
104         , _playlist (playlist_)
105         , _suspended (0)
106         , _tolerant (film->tolerant())
107         , _audio_merger (_film->audio_frame_rate())
108 {
109         construct ();
110 }
111
112 void
113 Player::construct ()
114 {
115         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
116         /* The butler must hear about this first, so since we are proxying this through to the butler we must
117            be first.
118         */
119         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
120         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
121         set_video_container_size (_film->frame_size ());
122
123         film_change (ChangeType::DONE, Film::AUDIO_PROCESSOR);
124
125         setup_pieces ();
126         seek (DCPTime (), true);
127 }
128
129 Player::~Player ()
130 {
131         delete _shuffler;
132 }
133
134 void
135 Player::setup_pieces ()
136 {
137         boost::mutex::scoped_lock lm (_mutex);
138         setup_pieces_unlocked ();
139 }
140
141
142 bool
143 have_video (shared_ptr<const Content> content)
144 {
145         return static_cast<bool>(content->video) && content->video->use();
146 }
147
148 bool
149 have_audio (shared_ptr<const Content> content)
150 {
151         return static_cast<bool>(content->audio);
152 }
153
154 void
155 Player::setup_pieces_unlocked ()
156 {
157         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
158
159         auto old_pieces = _pieces;
160         _pieces.clear ();
161
162         delete _shuffler;
163         _shuffler = new Shuffler();
164         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
165
166         for (auto i: playlist()->content()) {
167
168                 if (!i->paths_valid ()) {
169                         continue;
170                 }
171
172                 if (_ignore_video && _ignore_audio && i->text.empty()) {
173                         /* We're only interested in text and this content has none */
174                         continue;
175                 }
176
177                 shared_ptr<Decoder> old_decoder;
178                 for (auto j: old_pieces) {
179                         if (j->content == i) {
180                                 old_decoder = j->decoder;
181                                 break;
182                         }
183                 }
184
185                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
186                 DCPOMATIC_ASSERT (decoder);
187
188                 FrameRateChange frc (_film, i);
189
190                 if (decoder->video && _ignore_video) {
191                         decoder->video->set_ignore (true);
192                 }
193
194                 if (decoder->audio && _ignore_audio) {
195                         decoder->audio->set_ignore (true);
196                 }
197
198                 if (_ignore_text) {
199                         for (auto i: decoder->text) {
200                                 i->set_ignore (true);
201                         }
202                 }
203
204                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
205                 if (dcp) {
206                         dcp->set_decode_referenced (_play_referenced);
207                         if (_play_referenced) {
208                                 dcp->set_forced_reduction (_dcp_decode_reduction);
209                         }
210                 }
211
212                 auto piece = make_shared<Piece>(i, decoder, frc);
213                 _pieces.push_back (piece);
214
215                 if (decoder->video) {
216                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
217                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
218                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
219                         } else {
220                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
221                         }
222                 }
223
224                 if (decoder->audio) {
225                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
226                 }
227
228                 auto j = decoder->text.begin();
229
230                 while (j != decoder->text.end()) {
231                         (*j)->BitmapStart.connect (
232                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
233                                 );
234                         (*j)->PlainStart.connect (
235                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
236                                 );
237                         (*j)->Stop.connect (
238                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
239                                 );
240
241                         ++j;
242                 }
243
244                 if (decoder->atmos) {
245                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
246                 }
247         }
248
249         _stream_states.clear ();
250         for (auto i: _pieces) {
251                 if (i->content->audio) {
252                         for (auto j: i->content->audio->streams()) {
253                                 _stream_states[j] = StreamState (i, i->content->position ());
254                         }
255                 }
256         }
257
258         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
259         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
260
261         _last_video_time = DCPTime ();
262         _last_video_eyes = Eyes::BOTH;
263         _last_audio_time = DCPTime ();
264 }
265
266 void
267 Player::playlist_content_change (ChangeType type, int property, bool frequent)
268 {
269         if (property == VideoContentProperty::CROP) {
270                 if (type == ChangeType::DONE) {
271                         dcp::Size const vcs = video_container_size();
272                         boost::mutex::scoped_lock lm (_mutex);
273                         for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
274                                 i->first->reset_metadata (_film, vcs);
275                         }
276                 }
277         } else {
278                 if (type == ChangeType::PENDING) {
279                         /* The player content is probably about to change, so we can't carry on
280                            until that has happened and we've rebuilt our pieces.  Stop pass()
281                            and seek() from working until then.
282                         */
283                         ++_suspended;
284                 } else if (type == ChangeType::DONE) {
285                         /* A change in our content has gone through.  Re-build our pieces. */
286                         setup_pieces ();
287                         --_suspended;
288                 } else if (type == ChangeType::CANCELLED) {
289                         --_suspended;
290                 }
291         }
292
293         Change (type, property, frequent);
294 }
295
296 void
297 Player::set_video_container_size (dcp::Size s)
298 {
299         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
300
301         {
302                 boost::mutex::scoped_lock lm (_mutex);
303
304                 if (s == _video_container_size) {
305                         lm.unlock ();
306                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
307                         return;
308                 }
309
310                 _video_container_size = s;
311
312                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
313                 _black_image->make_black ();
314         }
315
316         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
317 }
318
319 void
320 Player::playlist_change (ChangeType type)
321 {
322         if (type == ChangeType::DONE) {
323                 setup_pieces ();
324         }
325         Change (type, PlayerProperty::PLAYLIST, false);
326 }
327
328 void
329 Player::film_change (ChangeType type, Film::Property p)
330 {
331         /* Here we should notice Film properties that affect our output, and
332            alert listeners that our output now would be different to how it was
333            last time we were run.
334         */
335
336         if (p == Film::CONTAINER) {
337                 Change (type, PlayerProperty::FILM_CONTAINER, false);
338         } else if (p == Film::VIDEO_FRAME_RATE) {
339                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
340                    so we need new pieces here.
341                 */
342                 if (type == ChangeType::DONE) {
343                         setup_pieces ();
344                 }
345                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
346         } else if (p == Film::AUDIO_PROCESSOR) {
347                 if (type == ChangeType::DONE && _film->audio_processor ()) {
348                         boost::mutex::scoped_lock lm (_mutex);
349                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
350                 }
351         } else if (p == Film::AUDIO_CHANNELS) {
352                 if (type == ChangeType::DONE) {
353                         boost::mutex::scoped_lock lm (_mutex);
354                         _audio_merger.clear ();
355                 }
356         }
357 }
358
359 shared_ptr<PlayerVideo>
360 Player::black_player_video_frame (Eyes eyes) const
361 {
362         return std::make_shared<PlayerVideo> (
363                 std::make_shared<const RawImageProxy>(_black_image),
364                 Crop(),
365                 optional<double>(),
366                 _video_container_size,
367                 _video_container_size,
368                 eyes,
369                 Part::WHOLE,
370                 PresetColourConversion::all().front().conversion,
371                 VideoRange::FULL,
372                 std::weak_ptr<Content>(),
373                 boost::optional<Frame>(),
374                 false
375         );
376 }
377
378 Frame
379 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
380 {
381         DCPTime s = t - piece->content->position ();
382         s = min (piece->content->length_after_trim(_film), s);
383         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
384
385         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
386            then convert that ContentTime to frames at the content's rate.  However this fails for
387            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
388            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
389
390            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
391         */
392         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
393 }
394
395 DCPTime
396 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
397 {
398         /* See comment in dcp_to_content_video */
399         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
400         return d + piece->content->position();
401 }
402
403 Frame
404 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
405 {
406         auto s = t - piece->content->position ();
407         s = min (piece->content->length_after_trim(_film), s);
408         /* See notes in dcp_to_content_video */
409         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
410 }
411
412 DCPTime
413 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
414 {
415         /* See comment in dcp_to_content_video */
416         return DCPTime::from_frames (f, _film->audio_frame_rate())
417                 - DCPTime (piece->content->trim_start(), piece->frc)
418                 + piece->content->position();
419 }
420
421 ContentTime
422 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
423 {
424         auto s = t - piece->content->position ();
425         s = min (piece->content->length_after_trim(_film), s);
426         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
427 }
428
429 DCPTime
430 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
431 {
432         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
433 }
434
435 vector<FontData>
436 Player::get_subtitle_fonts ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439
440         vector<FontData> fonts;
441         for (auto i: _pieces) {
442                 /* XXX: things may go wrong if there are duplicate font IDs
443                    with different font files.
444                 */
445                 auto f = i->decoder->fonts ();
446                 copy (f.begin(), f.end(), back_inserter(fonts));
447         }
448
449         return fonts;
450 }
451
452 /** Set this player never to produce any video data */
453 void
454 Player::set_ignore_video ()
455 {
456         boost::mutex::scoped_lock lm (_mutex);
457         _ignore_video = true;
458         setup_pieces_unlocked ();
459 }
460
461 void
462 Player::set_ignore_audio ()
463 {
464         boost::mutex::scoped_lock lm (_mutex);
465         _ignore_audio = true;
466         setup_pieces_unlocked ();
467 }
468
469 void
470 Player::set_ignore_text ()
471 {
472         boost::mutex::scoped_lock lm (_mutex);
473         _ignore_text = true;
474         setup_pieces_unlocked ();
475 }
476
477 /** Set the player to always burn open texts into the image regardless of the content settings */
478 void
479 Player::set_always_burn_open_subtitles ()
480 {
481         boost::mutex::scoped_lock lm (_mutex);
482         _always_burn_open_subtitles = true;
483 }
484
485 /** Sets up the player to be faster, possibly at the expense of quality */
486 void
487 Player::set_fast ()
488 {
489         boost::mutex::scoped_lock lm (_mutex);
490         _fast = true;
491         setup_pieces_unlocked ();
492 }
493
494 void
495 Player::set_play_referenced ()
496 {
497         boost::mutex::scoped_lock lm (_mutex);
498         _play_referenced = true;
499         setup_pieces_unlocked ();
500 }
501
502 static void
503 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
504 {
505         DCPOMATIC_ASSERT (r);
506         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
507         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
508         if (r->actual_duration() > 0) {
509                 a.push_back (
510                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
511                         );
512         }
513 }
514
515 list<ReferencedReelAsset>
516 Player::get_reel_assets ()
517 {
518         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
519
520         list<ReferencedReelAsset> a;
521
522         for (auto i: playlist()->content()) {
523                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
524                 if (!j) {
525                         continue;
526                 }
527
528                 scoped_ptr<DCPDecoder> decoder;
529                 try {
530                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
531                 } catch (...) {
532                         return a;
533                 }
534
535                 DCPOMATIC_ASSERT (j->video_frame_rate ());
536                 double const cfr = j->video_frame_rate().get();
537                 Frame const trim_start = j->trim_start().frames_round (cfr);
538                 Frame const trim_end = j->trim_end().frames_round (cfr);
539                 int const ffr = _film->video_frame_rate ();
540
541                 /* position in the asset from the start */
542                 int64_t offset_from_start = 0;
543                 /* position in the asset from the end */
544                 int64_t offset_from_end = 0;
545                 for (auto k: decoder->reels()) {
546                         /* Assume that main picture duration is the length of the reel */
547                         offset_from_end += k->main_picture()->actual_duration();
548                 }
549
550                 for (auto k: decoder->reels()) {
551
552                         /* Assume that main picture duration is the length of the reel */
553                         int64_t const reel_duration = k->main_picture()->actual_duration();
554
555                         /* See doc/design/trim_reels.svg */
556                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
557                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
558
559                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
560                         if (j->reference_video ()) {
561                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
562                         }
563
564                         if (j->reference_audio ()) {
565                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
566                         }
567
568                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
569                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
570                         }
571
572                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
573                                 for (auto l: k->closed_captions()) {
574                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
575                                 }
576                         }
577
578                         offset_from_start += reel_duration;
579                         offset_from_end -= reel_duration;
580                 }
581         }
582
583         return a;
584 }
585
586 bool
587 Player::pass ()
588 {
589         boost::mutex::scoped_lock lm (_mutex);
590
591         if (_suspended) {
592                 /* We can't pass in this state */
593                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
594                 return false;
595         }
596
597         if (_playback_length == DCPTime()) {
598                 /* Special; just give one black frame */
599                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
600                 return true;
601         }
602
603         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
604
605         shared_ptr<Piece> earliest_content;
606         optional<DCPTime> earliest_time;
607
608         for (auto i: _pieces) {
609                 if (i->done) {
610                         continue;
611                 }
612
613                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
614                 if (t > i->content->end(_film)) {
615                         i->done = true;
616                 } else {
617
618                         /* Given two choices at the same time, pick the one with texts so we see it before
619                            the video.
620                         */
621                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
622                                 earliest_time = t;
623                                 earliest_content = i;
624                         }
625                 }
626         }
627
628         bool done = false;
629
630         enum {
631                 NONE,
632                 CONTENT,
633                 BLACK,
634                 SILENT
635         } which = NONE;
636
637         if (earliest_content) {
638                 which = CONTENT;
639         }
640
641         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
642                 earliest_time = _black.position ();
643                 which = BLACK;
644         }
645
646         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
647                 earliest_time = _silent.position ();
648                 which = SILENT;
649         }
650
651         switch (which) {
652         case CONTENT:
653         {
654                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
655                 earliest_content->done = earliest_content->decoder->pass ();
656                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
657                 if (dcp && !_play_referenced && dcp->reference_audio()) {
658                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
659                            to `hide' the fact that no audio was emitted during the referenced DCP (though
660                            we need to behave as though it was).
661                         */
662                         _last_audio_time = dcp->end (_film);
663                 }
664                 break;
665         }
666         case BLACK:
667                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
668                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
669                 _black.set_position (_black.position() + one_video_frame());
670                 break;
671         case SILENT:
672         {
673                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
674                 DCPTimePeriod period (_silent.period_at_position());
675                 if (_last_audio_time) {
676                         /* Sometimes the thing that happened last finishes fractionally before
677                            or after this silence.  Bodge the start time of the silence to fix it.
678                            I think this is nothing to worry about since we will just add or
679                            remove a little silence at the end of some content.
680                         */
681                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
682                         /* Let's not worry about less than a frame at 24fps */
683                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
684                         if (error >= too_much_error) {
685                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
686                         }
687                         DCPOMATIC_ASSERT (error < too_much_error);
688                         period.from = *_last_audio_time;
689                 }
690                 if (period.duration() > one_video_frame()) {
691                         period.to = period.from + one_video_frame();
692                 }
693                 fill_audio (period);
694                 _silent.set_position (period.to);
695                 break;
696         }
697         case NONE:
698                 done = true;
699                 break;
700         }
701
702         /* Emit any audio that is ready */
703
704         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
705            of our streams, or the position of the _silent.
706         */
707         DCPTime pull_to = _playback_length;
708         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
709                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
710                         pull_to = i->second.last_push_end;
711                 }
712         }
713         if (!_silent.done() && _silent.position() < pull_to) {
714                 pull_to = _silent.position();
715         }
716
717         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
718         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
719         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
720                 if (_last_audio_time && i->second < *_last_audio_time) {
721                         /* This new data comes before the last we emitted (or the last seek); discard it */
722                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
723                         if (!cut.first) {
724                                 continue;
725                         }
726                         *i = cut;
727                 } else if (_last_audio_time && i->second > *_last_audio_time) {
728                         /* There's a gap between this data and the last we emitted; fill with silence */
729                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
730                 }
731
732                 emit_audio (i->first, i->second);
733         }
734
735         if (done) {
736                 _shuffler->flush ();
737                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
738                         do_emit_video(i->first, i->second);
739                 }
740         }
741
742         return done;
743 }
744
745 /** @return Open subtitles for the frame at the given time, converted to images */
746 optional<PositionImage>
747 Player::open_subtitles_for_frame (DCPTime time) const
748 {
749         list<PositionImage> captions;
750         int const vfr = _film->video_frame_rate();
751
752         for (
753                 auto j:
754                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
755                 ) {
756
757                 /* Bitmap subtitles */
758                 for (auto i: j.bitmap) {
759                         if (!i.image) {
760                                 continue;
761                         }
762
763                         /* i.image will already have been scaled to fit _video_container_size */
764                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
765
766                         captions.push_back (
767                                 PositionImage (
768                                         i.image,
769                                         Position<int> (
770                                                 lrint (_video_container_size.width * i.rectangle.x),
771                                                 lrint (_video_container_size.height * i.rectangle.y)
772                                                 )
773                                         )
774                                 );
775                 }
776
777                 /* String subtitles (rendered to an image) */
778                 if (!j.string.empty ()) {
779                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
780                         copy (s.begin(), s.end(), back_inserter (captions));
781                 }
782         }
783
784         if (captions.empty ()) {
785                 return optional<PositionImage> ();
786         }
787
788         return merge (captions);
789 }
790
791 void
792 Player::video (weak_ptr<Piece> wp, ContentVideo video)
793 {
794         shared_ptr<Piece> piece = wp.lock ();
795         if (!piece) {
796                 return;
797         }
798
799         if (!piece->content->video->use()) {
800                 return;
801         }
802
803         FrameRateChange frc (_film, piece->content);
804         if (frc.skip && (video.frame % 2) == 1) {
805                 return;
806         }
807
808         /* Time of the first frame we will emit */
809         DCPTime const time = content_video_to_dcp (piece, video.frame);
810         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
811
812         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
813            if it's after the content's period here as in that case we still need to fill any gap between
814            `now' and the end of the content's period.
815         */
816         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
817                 return;
818         }
819
820         /* Fill gaps that we discover now that we have some video which needs to be emitted.
821            This is where we need to fill to.
822         */
823         DCPTime fill_to = min (time, piece->content->end(_film));
824
825         if (_last_video_time) {
826                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
827
828                 /* Fill if we have more than half a frame to do */
829                 if ((fill_to - fill_from) > one_video_frame() / 2) {
830                         LastVideoMap::const_iterator last = _last_video.find (wp);
831                         if (_film->three_d()) {
832                                 Eyes fill_to_eyes = video.eyes;
833                                 if (fill_to_eyes == Eyes::BOTH) {
834                                         fill_to_eyes = Eyes::LEFT;
835                                 }
836                                 if (fill_to == piece->content->end(_film)) {
837                                         /* Don't fill after the end of the content */
838                                         fill_to_eyes = Eyes::LEFT;
839                                 }
840                                 DCPTime j = fill_from;
841                                 Eyes eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
842                                 if (eyes == Eyes::BOTH) {
843                                         eyes = Eyes::LEFT;
844                                 }
845                                 while (j < fill_to || eyes != fill_to_eyes) {
846                                         if (last != _last_video.end()) {
847                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
848                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
849                                                 copy->set_eyes (eyes);
850                                                 emit_video (copy, j);
851                                         } else {
852                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
853                                                 emit_video (black_player_video_frame(eyes), j);
854                                         }
855                                         if (eyes == Eyes::RIGHT) {
856                                                 j += one_video_frame();
857                                         }
858                                         eyes = increment_eyes (eyes);
859                                 }
860                         } else {
861                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
862                                         if (last != _last_video.end()) {
863                                                 emit_video (last->second, j);
864                                         } else {
865                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
866                                         }
867                                 }
868                         }
869                 }
870         }
871
872         _last_video[wp].reset (
873                 new PlayerVideo (
874                         video.image,
875                         piece->content->video->crop (),
876                         piece->content->video->fade (_film, video.frame),
877                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
878                         _video_container_size,
879                         video.eyes,
880                         video.part,
881                         piece->content->video->colour_conversion(),
882                         piece->content->video->range(),
883                         piece->content,
884                         video.frame,
885                         false
886                         )
887                 );
888
889         DCPTime t = time;
890         for (int i = 0; i < frc.repeat; ++i) {
891                 if (t < piece->content->end(_film)) {
892                         emit_video (_last_video[wp], t);
893                 }
894                 t += one_video_frame ();
895         }
896 }
897
898 void
899 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
900 {
901         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
902
903         shared_ptr<Piece> piece = wp.lock ();
904         if (!piece) {
905                 return;
906         }
907
908         shared_ptr<AudioContent> content = piece->content->audio;
909         DCPOMATIC_ASSERT (content);
910
911         int const rfr = content->resampled_frame_rate (_film);
912
913         /* Compute time in the DCP */
914         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
915         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
916
917         /* And the end of this block in the DCP */
918         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
919
920         /* Remove anything that comes before the start or after the end of the content */
921         if (time < piece->content->position()) {
922                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
923                 if (!cut.first) {
924                         /* This audio is entirely discarded */
925                         return;
926                 }
927                 content_audio.audio = cut.first;
928                 time = cut.second;
929         } else if (time > piece->content->end(_film)) {
930                 /* Discard it all */
931                 return;
932         } else if (end > piece->content->end(_film)) {
933                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
934                 if (remaining_frames == 0) {
935                         return;
936                 }
937                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
938         }
939
940         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
941
942         /* Gain */
943
944         if (content->gain() != 0) {
945                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
946                 gain->apply_gain (content->gain ());
947                 content_audio.audio = gain;
948         }
949
950         /* Remap */
951
952         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
953
954         /* Process */
955
956         if (_audio_processor) {
957                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
958         }
959
960         /* Push */
961
962         _audio_merger.push (content_audio.audio, time);
963         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
964         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
965 }
966
967 void
968 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
969 {
970         shared_ptr<Piece> piece = wp.lock ();
971         shared_ptr<const TextContent> text = wc.lock ();
972         if (!piece || !text) {
973                 return;
974         }
975
976         /* Apply content's subtitle offsets */
977         subtitle.sub.rectangle.x += text->x_offset ();
978         subtitle.sub.rectangle.y += text->y_offset ();
979
980         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
981         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
982         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
983
984         /* Apply content's subtitle scale */
985         subtitle.sub.rectangle.width *= text->x_scale ();
986         subtitle.sub.rectangle.height *= text->y_scale ();
987
988         PlayerText ps;
989         shared_ptr<Image> image = subtitle.sub.image;
990
991         /* We will scale the subtitle up to fit _video_container_size */
992         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
993         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
994         if (width == 0 || height == 0) {
995                 return;
996         }
997
998         dcp::Size scaled_size (width, height);
999         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1000         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1001
1002         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1003 }
1004
1005 void
1006 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1007 {
1008         shared_ptr<Piece> piece = wp.lock ();
1009         shared_ptr<const TextContent> text = wc.lock ();
1010         if (!piece || !text) {
1011                 return;
1012         }
1013
1014         PlayerText ps;
1015         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1016
1017         if (from > piece->content->end(_film)) {
1018                 return;
1019         }
1020
1021         for (auto s: subtitle.subs) {
1022                 s.set_h_position (s.h_position() + text->x_offset ());
1023                 s.set_v_position (s.v_position() + text->y_offset ());
1024                 float const xs = text->x_scale();
1025                 float const ys = text->y_scale();
1026                 float size = s.size();
1027
1028                 /* Adjust size to express the common part of the scaling;
1029                    e.g. if xs = ys = 0.5 we scale size by 2.
1030                 */
1031                 if (xs > 1e-5 && ys > 1e-5) {
1032                         size *= 1 / min (1 / xs, 1 / ys);
1033                 }
1034                 s.set_size (size);
1035
1036                 /* Then express aspect ratio changes */
1037                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1038                         s.set_aspect_adjust (xs / ys);
1039                 }
1040
1041                 s.set_in (dcp::Time(from.seconds(), 1000));
1042                 ps.string.push_back (StringText (s, text->outline_width()));
1043                 ps.add_fonts (text->fonts ());
1044         }
1045
1046         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1047 }
1048
1049 void
1050 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1051 {
1052         shared_ptr<const TextContent> text = wc.lock ();
1053         if (!text) {
1054                 return;
1055         }
1056
1057         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1058                 return;
1059         }
1060
1061         shared_ptr<Piece> piece = wp.lock ();
1062         if (!piece) {
1063                 return;
1064         }
1065
1066         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1067
1068         if (dcp_to > piece->content->end(_film)) {
1069                 return;
1070         }
1071
1072         pair<PlayerText, DCPTime> from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1073
1074         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1075         if (text->use() && !always && !text->burn()) {
1076                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1077         }
1078 }
1079
1080 void
1081 Player::seek (DCPTime time, bool accurate)
1082 {
1083         boost::mutex::scoped_lock lm (_mutex);
1084         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1085
1086         if (_suspended) {
1087                 /* We can't seek in this state */
1088                 return;
1089         }
1090
1091         if (_shuffler) {
1092                 _shuffler->clear ();
1093         }
1094
1095         _delay.clear ();
1096
1097         if (_audio_processor) {
1098                 _audio_processor->flush ();
1099         }
1100
1101         _audio_merger.clear ();
1102         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1103                 _active_texts[i].clear ();
1104         }
1105
1106         for (auto i: _pieces) {
1107                 if (time < i->content->position()) {
1108                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1109                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1110                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1111                            been trimmed to a point between keyframes, or something).
1112                         */
1113                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1114                         i->done = false;
1115                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1116                         /* During; seek to position */
1117                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1118                         i->done = false;
1119                 } else {
1120                         /* After; this piece is done */
1121                         i->done = true;
1122                 }
1123         }
1124
1125         if (accurate) {
1126                 _last_video_time = time;
1127                 _last_video_eyes = Eyes::LEFT;
1128                 _last_audio_time = time;
1129         } else {
1130                 _last_video_time = optional<DCPTime>();
1131                 _last_video_eyes = optional<Eyes>();
1132                 _last_audio_time = optional<DCPTime>();
1133         }
1134
1135         _black.set_position (time);
1136         _silent.set_position (time);
1137
1138         _last_video.clear ();
1139 }
1140
1141 void
1142 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1143 {
1144         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1145            player before the video that requires them.
1146         */
1147         _delay.push_back (make_pair (pv, time));
1148
1149         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1150                 _last_video_time = time + one_video_frame();
1151         }
1152         _last_video_eyes = increment_eyes (pv->eyes());
1153
1154         if (_delay.size() < 3) {
1155                 return;
1156         }
1157
1158         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1159         _delay.pop_front();
1160         do_emit_video (to_do.first, to_do.second);
1161 }
1162
1163 void
1164 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1165 {
1166         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1167                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1168                         _active_texts[i].clear_before (time);
1169                 }
1170         }
1171
1172         auto subtitles = open_subtitles_for_frame (time);
1173         if (subtitles) {
1174                 pv->set_text (subtitles.get ());
1175         }
1176
1177         Video (pv, time);
1178 }
1179
1180 void
1181 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1182 {
1183         /* Log if the assert below is about to fail */
1184         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1185                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1186         }
1187
1188         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1189         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1190         Audio (data, time, _film->audio_frame_rate());
1191         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1192 }
1193
1194 void
1195 Player::fill_audio (DCPTimePeriod period)
1196 {
1197         if (period.from == period.to) {
1198                 return;
1199         }
1200
1201         DCPOMATIC_ASSERT (period.from < period.to);
1202
1203         DCPTime t = period.from;
1204         while (t < period.to) {
1205                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1206                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1207                 if (samples) {
1208                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1209                         silence->make_silent ();
1210                         emit_audio (silence, t);
1211                 }
1212                 t += block;
1213         }
1214 }
1215
1216 DCPTime
1217 Player::one_video_frame () const
1218 {
1219         return DCPTime::from_frames (1, _film->video_frame_rate ());
1220 }
1221
1222 pair<shared_ptr<AudioBuffers>, DCPTime>
1223 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1224 {
1225         DCPTime const discard_time = discard_to - time;
1226         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1227         Frame remaining_frames = audio->frames() - discard_frames;
1228         if (remaining_frames <= 0) {
1229                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1230         }
1231         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1232         return make_pair(cut, time + discard_time);
1233 }
1234
1235 void
1236 Player::set_dcp_decode_reduction (optional<int> reduction)
1237 {
1238         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1239
1240         {
1241                 boost::mutex::scoped_lock lm (_mutex);
1242
1243                 if (reduction == _dcp_decode_reduction) {
1244                         lm.unlock ();
1245                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1246                         return;
1247                 }
1248
1249                 _dcp_decode_reduction = reduction;
1250                 setup_pieces_unlocked ();
1251         }
1252
1253         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1254 }
1255
1256 optional<DCPTime>
1257 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1258 {
1259         boost::mutex::scoped_lock lm (_mutex);
1260
1261         for (auto i: _pieces) {
1262                 if (i->content == content) {
1263                         return content_time_to_dcp (i, t);
1264                 }
1265         }
1266
1267         /* We couldn't find this content; perhaps things are being changed over */
1268         return {};
1269 }
1270
1271
1272 shared_ptr<const Playlist>
1273 Player::playlist () const
1274 {
1275         return _playlist ? _playlist : _film->playlist();
1276 }
1277
1278
1279 void
1280 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1281 {
1282         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1283 }
1284