C++11 cleanups.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         if (j->content == i) {
191                                 old_decoder = j->decoder;
192                                 break;
193                         }
194                 }
195
196                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197                 DCPOMATIC_ASSERT (decoder);
198
199                 FrameRateChange frc (_film, i);
200
201                 if (decoder->video && _ignore_video) {
202                         decoder->video->set_ignore (true);
203                 }
204
205                 if (decoder->audio && _ignore_audio) {
206                         decoder->audio->set_ignore (true);
207                 }
208
209                 if (_ignore_text) {
210                         for (auto i: decoder->text) {
211                                 i->set_ignore (true);
212                         }
213                 }
214
215                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
216                 if (dcp) {
217                         dcp->set_decode_referenced (_play_referenced);
218                         if (_play_referenced) {
219                                 dcp->set_forced_reduction (_dcp_decode_reduction);
220                         }
221                 }
222
223                 auto piece = make_shared<Piece>(i, decoder, frc);
224                 _pieces.push_back (piece);
225
226                 if (decoder->video) {
227                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
228                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
230                         } else {
231                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
232                         }
233                 }
234
235                 if (decoder->audio) {
236                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
237                 }
238
239                 auto j = decoder->text.begin();
240
241                 while (j != decoder->text.end()) {
242                         (*j)->BitmapStart.connect (
243                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245                         (*j)->PlainStart.connect (
246                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->Stop.connect (
249                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251
252                         ++j;
253                 }
254
255                 if (decoder->atmos) {
256                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
257                 }
258         }
259
260         _stream_states.clear ();
261         for (auto i: _pieces) {
262                 if (i->content->audio) {
263                         for (auto j: i->content->audio->streams()) {
264                                 _stream_states[j] = StreamState (i, i->content->position ());
265                         }
266                 }
267         }
268
269         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
271
272         _last_video_time = {};
273         _last_video_eyes = Eyes::BOTH;
274         _last_audio_time = {};
275 }
276
277
278 void
279 Player::playlist_content_change (ChangeType type, int property, bool frequent)
280 {
281         if (property == VideoContentProperty::CROP) {
282                 if (type == ChangeType::DONE) {
283                         auto const vcs = video_container_size();
284                         boost::mutex::scoped_lock lm (_mutex);
285                         for (auto const& i: _delay) {
286                                 i.first->reset_metadata (_film, vcs);
287                         }
288                 }
289         } else {
290                 if (type == ChangeType::PENDING) {
291                         /* The player content is probably about to change, so we can't carry on
292                            until that has happened and we've rebuilt our pieces.  Stop pass()
293                            and seek() from working until then.
294                         */
295                         ++_suspended;
296                 } else if (type == ChangeType::DONE) {
297                         /* A change in our content has gone through.  Re-build our pieces. */
298                         setup_pieces ();
299                         --_suspended;
300                 } else if (type == ChangeType::CANCELLED) {
301                         --_suspended;
302                 }
303         }
304
305         Change (type, property, frequent);
306 }
307
308
309 void
310 Player::set_video_container_size (dcp::Size s)
311 {
312         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
313
314         {
315                 boost::mutex::scoped_lock lm (_mutex);
316
317                 if (s == _video_container_size) {
318                         lm.unlock ();
319                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
320                         return;
321                 }
322
323                 _video_container_size = s;
324
325                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
326                 _black_image->make_black ();
327         }
328
329         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
330 }
331
332
333 void
334 Player::playlist_change (ChangeType type)
335 {
336         if (type == ChangeType::DONE) {
337                 setup_pieces ();
338         }
339         Change (type, PlayerProperty::PLAYLIST, false);
340 }
341
342
343 void
344 Player::film_change (ChangeType type, Film::Property p)
345 {
346         /* Here we should notice Film properties that affect our output, and
347            alert listeners that our output now would be different to how it was
348            last time we were run.
349         */
350
351         if (p == Film::Property::CONTAINER) {
352                 Change (type, PlayerProperty::FILM_CONTAINER, false);
353         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
354                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
355                    so we need new pieces here.
356                 */
357                 if (type == ChangeType::DONE) {
358                         setup_pieces ();
359                 }
360                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
361         } else if (p == Film::Property::AUDIO_PROCESSOR) {
362                 if (type == ChangeType::DONE && _film->audio_processor ()) {
363                         boost::mutex::scoped_lock lm (_mutex);
364                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
365                 }
366         } else if (p == Film::Property::AUDIO_CHANNELS) {
367                 if (type == ChangeType::DONE) {
368                         boost::mutex::scoped_lock lm (_mutex);
369                         _audio_merger.clear ();
370                 }
371         }
372 }
373
374
375 shared_ptr<PlayerVideo>
376 Player::black_player_video_frame (Eyes eyes) const
377 {
378         return std::make_shared<PlayerVideo> (
379                 std::make_shared<const RawImageProxy>(_black_image),
380                 Crop(),
381                 optional<double>(),
382                 _video_container_size,
383                 _video_container_size,
384                 eyes,
385                 Part::WHOLE,
386                 PresetColourConversion::all().front().conversion,
387                 VideoRange::FULL,
388                 std::weak_ptr<Content>(),
389                 boost::optional<Frame>(),
390                 false
391         );
392 }
393
394
395 Frame
396 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
397 {
398         auto s = t - piece->content->position ();
399         s = min (piece->content->length_after_trim(_film), s);
400         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
401
402         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
403            then convert that ContentTime to frames at the content's rate.  However this fails for
404            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
405            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
406
407            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
408         */
409         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
410 }
411
412
413 DCPTime
414 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
415 {
416         /* See comment in dcp_to_content_video */
417         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
418         return d + piece->content->position();
419 }
420
421
422 Frame
423 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
424 {
425         auto s = t - piece->content->position ();
426         s = min (piece->content->length_after_trim(_film), s);
427         /* See notes in dcp_to_content_video */
428         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
429 }
430
431
432 DCPTime
433 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
434 {
435         /* See comment in dcp_to_content_video */
436         return DCPTime::from_frames (f, _film->audio_frame_rate())
437                 - DCPTime (piece->content->trim_start(), piece->frc)
438                 + piece->content->position();
439 }
440
441
442 ContentTime
443 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
444 {
445         auto s = t - piece->content->position ();
446         s = min (piece->content->length_after_trim(_film), s);
447         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
448 }
449
450
451 DCPTime
452 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
453 {
454         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
455 }
456
457
458 vector<FontData>
459 Player::get_subtitle_fonts ()
460 {
461         boost::mutex::scoped_lock lm (_mutex);
462
463         vector<FontData> fonts;
464         for (auto i: _pieces) {
465                 /* XXX: things may go wrong if there are duplicate font IDs
466                    with different font files.
467                 */
468                 auto f = i->decoder->fonts ();
469                 copy (f.begin(), f.end(), back_inserter(fonts));
470         }
471
472         return fonts;
473 }
474
475
476 /** Set this player never to produce any video data */
477 void
478 Player::set_ignore_video ()
479 {
480         boost::mutex::scoped_lock lm (_mutex);
481         _ignore_video = true;
482         setup_pieces_unlocked ();
483 }
484
485
486 void
487 Player::set_ignore_audio ()
488 {
489         boost::mutex::scoped_lock lm (_mutex);
490         _ignore_audio = true;
491         setup_pieces_unlocked ();
492 }
493
494
495 void
496 Player::set_ignore_text ()
497 {
498         boost::mutex::scoped_lock lm (_mutex);
499         _ignore_text = true;
500         setup_pieces_unlocked ();
501 }
502
503
504 /** Set the player to always burn open texts into the image regardless of the content settings */
505 void
506 Player::set_always_burn_open_subtitles ()
507 {
508         boost::mutex::scoped_lock lm (_mutex);
509         _always_burn_open_subtitles = true;
510 }
511
512
513 /** Sets up the player to be faster, possibly at the expense of quality */
514 void
515 Player::set_fast ()
516 {
517         boost::mutex::scoped_lock lm (_mutex);
518         _fast = true;
519         setup_pieces_unlocked ();
520 }
521
522
523 void
524 Player::set_play_referenced ()
525 {
526         boost::mutex::scoped_lock lm (_mutex);
527         _play_referenced = true;
528         setup_pieces_unlocked ();
529 }
530
531
532 static void
533 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
534 {
535         DCPOMATIC_ASSERT (r);
536         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
537         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
538         if (r->actual_duration() > 0) {
539                 a.push_back (
540                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
541                         );
542         }
543 }
544
545
546 list<ReferencedReelAsset>
547 Player::get_reel_assets ()
548 {
549         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
550
551         list<ReferencedReelAsset> a;
552
553         for (auto i: playlist()->content()) {
554                 auto j = dynamic_pointer_cast<DCPContent> (i);
555                 if (!j) {
556                         continue;
557                 }
558
559                 scoped_ptr<DCPDecoder> decoder;
560                 try {
561                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
562                 } catch (...) {
563                         return a;
564                 }
565
566                 DCPOMATIC_ASSERT (j->video_frame_rate ());
567                 double const cfr = j->video_frame_rate().get();
568                 Frame const trim_start = j->trim_start().frames_round (cfr);
569                 Frame const trim_end = j->trim_end().frames_round (cfr);
570                 int const ffr = _film->video_frame_rate ();
571
572                 /* position in the asset from the start */
573                 int64_t offset_from_start = 0;
574                 /* position in the asset from the end */
575                 int64_t offset_from_end = 0;
576                 for (auto k: decoder->reels()) {
577                         /* Assume that main picture duration is the length of the reel */
578                         offset_from_end += k->main_picture()->actual_duration();
579                 }
580
581                 for (auto k: decoder->reels()) {
582
583                         /* Assume that main picture duration is the length of the reel */
584                         int64_t const reel_duration = k->main_picture()->actual_duration();
585
586                         /* See doc/design/trim_reels.svg */
587                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
588                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
589
590                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
591                         if (j->reference_video ()) {
592                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
593                         }
594
595                         if (j->reference_audio ()) {
596                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
597                         }
598
599                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
600                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
601                         }
602
603                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
604                                 for (auto l: k->closed_captions()) {
605                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
606                                 }
607                         }
608
609                         offset_from_start += reel_duration;
610                         offset_from_end -= reel_duration;
611                 }
612         }
613
614         return a;
615 }
616
617
618 bool
619 Player::pass ()
620 {
621         boost::mutex::scoped_lock lm (_mutex);
622
623         if (_suspended) {
624                 /* We can't pass in this state */
625                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
626                 return false;
627         }
628
629         if (_playback_length == DCPTime()) {
630                 /* Special; just give one black frame */
631                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
632                 return true;
633         }
634
635         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
636
637         shared_ptr<Piece> earliest_content;
638         optional<DCPTime> earliest_time;
639
640         for (auto i: _pieces) {
641                 if (i->done) {
642                         continue;
643                 }
644
645                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
646                 if (t > i->content->end(_film)) {
647                         i->done = true;
648                 } else {
649
650                         /* Given two choices at the same time, pick the one with texts so we see it before
651                            the video.
652                         */
653                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
654                                 earliest_time = t;
655                                 earliest_content = i;
656                         }
657                 }
658         }
659
660         bool done = false;
661
662         enum {
663                 NONE,
664                 CONTENT,
665                 BLACK,
666                 SILENT
667         } which = NONE;
668
669         if (earliest_content) {
670                 which = CONTENT;
671         }
672
673         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
674                 earliest_time = _black.position ();
675                 which = BLACK;
676         }
677
678         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
679                 earliest_time = _silent.position ();
680                 which = SILENT;
681         }
682
683         switch (which) {
684         case CONTENT:
685         {
686                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
687                 earliest_content->done = earliest_content->decoder->pass ();
688                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
689                 if (dcp && !_play_referenced && dcp->reference_audio()) {
690                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
691                            to `hide' the fact that no audio was emitted during the referenced DCP (though
692                            we need to behave as though it was).
693                         */
694                         _last_audio_time = dcp->end (_film);
695                 }
696                 break;
697         }
698         case BLACK:
699                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
700                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
701                 _black.set_position (_black.position() + one_video_frame());
702                 break;
703         case SILENT:
704         {
705                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
706                 DCPTimePeriod period (_silent.period_at_position());
707                 if (_last_audio_time) {
708                         /* Sometimes the thing that happened last finishes fractionally before
709                            or after this silence.  Bodge the start time of the silence to fix it.
710                            I think this is nothing to worry about since we will just add or
711                            remove a little silence at the end of some content.
712                         */
713                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
714                         /* Let's not worry about less than a frame at 24fps */
715                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
716                         if (error >= too_much_error) {
717                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
718                         }
719                         DCPOMATIC_ASSERT (error < too_much_error);
720                         period.from = *_last_audio_time;
721                 }
722                 if (period.duration() > one_video_frame()) {
723                         period.to = period.from + one_video_frame();
724                 }
725                 fill_audio (period);
726                 _silent.set_position (period.to);
727                 break;
728         }
729         case NONE:
730                 done = true;
731                 break;
732         }
733
734         /* Emit any audio that is ready */
735
736         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
737            of our streams, or the position of the _silent.
738         */
739         auto pull_to = _playback_length;
740         for (auto const& i: _stream_states) {
741                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
742                         pull_to = i.second.last_push_end;
743                 }
744         }
745         if (!_silent.done() && _silent.position() < pull_to) {
746                 pull_to = _silent.position();
747         }
748
749         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
750         auto audio = _audio_merger.pull (pull_to);
751         for (auto i = audio.begin(); i != audio.end(); ++i) {
752                 if (_last_audio_time && i->second < *_last_audio_time) {
753                         /* This new data comes before the last we emitted (or the last seek); discard it */
754                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
755                         if (!cut.first) {
756                                 continue;
757                         }
758                         *i = cut;
759                 } else if (_last_audio_time && i->second > *_last_audio_time) {
760                         /* There's a gap between this data and the last we emitted; fill with silence */
761                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
762                 }
763
764                 emit_audio (i->first, i->second);
765         }
766
767         if (done) {
768                 _shuffler->flush ();
769                 for (auto const& i: _delay) {
770                         do_emit_video(i.first, i.second);
771                 }
772         }
773
774         return done;
775 }
776
777
778 /** @return Open subtitles for the frame at the given time, converted to images */
779 optional<PositionImage>
780 Player::open_subtitles_for_frame (DCPTime time) const
781 {
782         list<PositionImage> captions;
783         int const vfr = _film->video_frame_rate();
784
785         for (
786                 auto j:
787                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
788                 ) {
789
790                 /* Bitmap subtitles */
791                 for (auto i: j.bitmap) {
792                         if (!i.image) {
793                                 continue;
794                         }
795
796                         /* i.image will already have been scaled to fit _video_container_size */
797                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
798
799                         captions.push_back (
800                                 PositionImage (
801                                         i.image,
802                                         Position<int> (
803                                                 lrint(_video_container_size.width * i.rectangle.x),
804                                                 lrint(_video_container_size.height * i.rectangle.y)
805                                                 )
806                                         )
807                                 );
808                 }
809
810                 /* String subtitles (rendered to an image) */
811                 if (!j.string.empty()) {
812                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
813                         copy (s.begin(), s.end(), back_inserter (captions));
814                 }
815         }
816
817         if (captions.empty()) {
818                 return {};
819         }
820
821         return merge (captions);
822 }
823
824
825 void
826 Player::video (weak_ptr<Piece> wp, ContentVideo video)
827 {
828         auto piece = wp.lock ();
829         if (!piece) {
830                 return;
831         }
832
833         if (!piece->content->video->use()) {
834                 return;
835         }
836
837         FrameRateChange frc (_film, piece->content);
838         if (frc.skip && (video.frame % 2) == 1) {
839                 return;
840         }
841
842         /* Time of the first frame we will emit */
843         DCPTime const time = content_video_to_dcp (piece, video.frame);
844         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
845
846         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
847            if it's after the content's period here as in that case we still need to fill any gap between
848            `now' and the end of the content's period.
849         */
850         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
851                 return;
852         }
853
854         /* Fill gaps that we discover now that we have some video which needs to be emitted.
855            This is where we need to fill to.
856         */
857         DCPTime fill_to = min (time, piece->content->end(_film));
858
859         if (_last_video_time) {
860                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
861
862                 /* Fill if we have more than half a frame to do */
863                 if ((fill_to - fill_from) > one_video_frame() / 2) {
864                         auto last = _last_video.find (wp);
865                         if (_film->three_d()) {
866                                 auto fill_to_eyes = video.eyes;
867                                 if (fill_to_eyes == Eyes::BOTH) {
868                                         fill_to_eyes = Eyes::LEFT;
869                                 }
870                                 if (fill_to == piece->content->end(_film)) {
871                                         /* Don't fill after the end of the content */
872                                         fill_to_eyes = Eyes::LEFT;
873                                 }
874                                 auto j = fill_from;
875                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
876                                 if (eyes == Eyes::BOTH) {
877                                         eyes = Eyes::LEFT;
878                                 }
879                                 while (j < fill_to || eyes != fill_to_eyes) {
880                                         if (last != _last_video.end()) {
881                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
882                                                 auto copy = last->second->shallow_copy();
883                                                 copy->set_eyes (eyes);
884                                                 emit_video (copy, j);
885                                         } else {
886                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
887                                                 emit_video (black_player_video_frame(eyes), j);
888                                         }
889                                         if (eyes == Eyes::RIGHT) {
890                                                 j += one_video_frame();
891                                         }
892                                         eyes = increment_eyes (eyes);
893                                 }
894                         } else {
895                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
896                                         if (last != _last_video.end()) {
897                                                 emit_video (last->second, j);
898                                         } else {
899                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
900                                         }
901                                 }
902                         }
903                 }
904         }
905
906         _last_video[wp] = std::make_shared<PlayerVideo>(
907                 video.image,
908                 piece->content->video->crop (),
909                 piece->content->video->fade (_film, video.frame),
910                 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
911                 _video_container_size,
912                 video.eyes,
913                 video.part,
914                 piece->content->video->colour_conversion(),
915                 piece->content->video->range(),
916                 piece->content,
917                 video.frame,
918                 false
919                 );
920
921         DCPTime t = time;
922         for (int i = 0; i < frc.repeat; ++i) {
923                 if (t < piece->content->end(_film)) {
924                         emit_video (_last_video[wp], t);
925                 }
926                 t += one_video_frame ();
927         }
928 }
929
930
931 void
932 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
933 {
934         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
935
936         auto piece = wp.lock ();
937         if (!piece) {
938                 return;
939         }
940
941         auto content = piece->content->audio;
942         DCPOMATIC_ASSERT (content);
943
944         int const rfr = content->resampled_frame_rate (_film);
945
946         /* Compute time in the DCP */
947         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
948         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
949
950         /* And the end of this block in the DCP */
951         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
952
953         /* Remove anything that comes before the start or after the end of the content */
954         if (time < piece->content->position()) {
955                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
956                 if (!cut.first) {
957                         /* This audio is entirely discarded */
958                         return;
959                 }
960                 content_audio.audio = cut.first;
961                 time = cut.second;
962         } else if (time > piece->content->end(_film)) {
963                 /* Discard it all */
964                 return;
965         } else if (end > piece->content->end(_film)) {
966                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
967                 if (remaining_frames == 0) {
968                         return;
969                 }
970                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
971         }
972
973         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
974
975         /* Gain */
976
977         if (content->gain() != 0) {
978                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
979                 gain->apply_gain (content->gain());
980                 content_audio.audio = gain;
981         }
982
983         /* Remap */
984
985         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
986
987         /* Process */
988
989         if (_audio_processor) {
990                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
991         }
992
993         /* Push */
994
995         _audio_merger.push (content_audio.audio, time);
996         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
997         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
998 }
999
1000
1001 void
1002 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
1003 {
1004         auto piece = wp.lock ();
1005         auto text = wc.lock ();
1006         if (!piece || !text) {
1007                 return;
1008         }
1009
1010         /* Apply content's subtitle offsets */
1011         subtitle.sub.rectangle.x += text->x_offset ();
1012         subtitle.sub.rectangle.y += text->y_offset ();
1013
1014         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1015         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1016         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1017
1018         /* Apply content's subtitle scale */
1019         subtitle.sub.rectangle.width *= text->x_scale ();
1020         subtitle.sub.rectangle.height *= text->y_scale ();
1021
1022         PlayerText ps;
1023         auto image = subtitle.sub.image;
1024
1025         /* We will scale the subtitle up to fit _video_container_size */
1026         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1027         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1028         if (width == 0 || height == 0) {
1029                 return;
1030         }
1031
1032         dcp::Size scaled_size (width, height);
1033         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1034         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1035
1036         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1037 }
1038
1039
1040 void
1041 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1042 {
1043         auto piece = wp.lock ();
1044         auto text = wc.lock ();
1045         if (!piece || !text) {
1046                 return;
1047         }
1048
1049         PlayerText ps;
1050         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1051
1052         if (from > piece->content->end(_film)) {
1053                 return;
1054         }
1055
1056         for (auto s: subtitle.subs) {
1057                 s.set_h_position (s.h_position() + text->x_offset ());
1058                 s.set_v_position (s.v_position() + text->y_offset ());
1059                 float const xs = text->x_scale();
1060                 float const ys = text->y_scale();
1061                 float size = s.size();
1062
1063                 /* Adjust size to express the common part of the scaling;
1064                    e.g. if xs = ys = 0.5 we scale size by 2.
1065                 */
1066                 if (xs > 1e-5 && ys > 1e-5) {
1067                         size *= 1 / min (1 / xs, 1 / ys);
1068                 }
1069                 s.set_size (size);
1070
1071                 /* Then express aspect ratio changes */
1072                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1073                         s.set_aspect_adjust (xs / ys);
1074                 }
1075
1076                 s.set_in (dcp::Time(from.seconds(), 1000));
1077                 ps.string.push_back (StringText (s, text->outline_width()));
1078                 ps.add_fonts (text->fonts ());
1079         }
1080
1081         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1082 }
1083
1084
1085 void
1086 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1087 {
1088         auto text = wc.lock ();
1089         if (!text) {
1090                 return;
1091         }
1092
1093         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1094                 return;
1095         }
1096
1097         shared_ptr<Piece> piece = wp.lock ();
1098         if (!piece) {
1099                 return;
1100         }
1101
1102         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1103
1104         if (dcp_to > piece->content->end(_film)) {
1105                 return;
1106         }
1107
1108         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1109
1110         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1111         if (text->use() && !always && !text->burn()) {
1112                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1113         }
1114 }
1115
1116
1117 void
1118 Player::seek (DCPTime time, bool accurate)
1119 {
1120         boost::mutex::scoped_lock lm (_mutex);
1121         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1122
1123         if (_suspended) {
1124                 /* We can't seek in this state */
1125                 return;
1126         }
1127
1128         if (_shuffler) {
1129                 _shuffler->clear ();
1130         }
1131
1132         _delay.clear ();
1133
1134         if (_audio_processor) {
1135                 _audio_processor->flush ();
1136         }
1137
1138         _audio_merger.clear ();
1139         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1140                 _active_texts[i].clear ();
1141         }
1142
1143         for (auto i: _pieces) {
1144                 if (time < i->content->position()) {
1145                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1146                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1147                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1148                            been trimmed to a point between keyframes, or something).
1149                         */
1150                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1151                         i->done = false;
1152                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1153                         /* During; seek to position */
1154                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1155                         i->done = false;
1156                 } else {
1157                         /* After; this piece is done */
1158                         i->done = true;
1159                 }
1160         }
1161
1162         if (accurate) {
1163                 _last_video_time = time;
1164                 _last_video_eyes = Eyes::LEFT;
1165                 _last_audio_time = time;
1166         } else {
1167                 _last_video_time = optional<DCPTime>();
1168                 _last_video_eyes = optional<Eyes>();
1169                 _last_audio_time = optional<DCPTime>();
1170         }
1171
1172         _black.set_position (time);
1173         _silent.set_position (time);
1174
1175         _last_video.clear ();
1176 }
1177
1178
1179 void
1180 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1181 {
1182         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1183            player before the video that requires them.
1184         */
1185         _delay.push_back (make_pair (pv, time));
1186
1187         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1188                 _last_video_time = time + one_video_frame();
1189         }
1190         _last_video_eyes = increment_eyes (pv->eyes());
1191
1192         if (_delay.size() < 3) {
1193                 return;
1194         }
1195
1196         auto to_do = _delay.front();
1197         _delay.pop_front();
1198         do_emit_video (to_do.first, to_do.second);
1199 }
1200
1201
1202 void
1203 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1204 {
1205         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1206                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1207                         _active_texts[i].clear_before (time);
1208                 }
1209         }
1210
1211         auto subtitles = open_subtitles_for_frame (time);
1212         if (subtitles) {
1213                 pv->set_text (subtitles.get ());
1214         }
1215
1216         Video (pv, time);
1217 }
1218
1219
1220 void
1221 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1222 {
1223         /* Log if the assert below is about to fail */
1224         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1225                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1226         }
1227
1228         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1229         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1230         Audio (data, time, _film->audio_frame_rate());
1231         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1232 }
1233
1234
1235 void
1236 Player::fill_audio (DCPTimePeriod period)
1237 {
1238         if (period.from == period.to) {
1239                 return;
1240         }
1241
1242         DCPOMATIC_ASSERT (period.from < period.to);
1243
1244         DCPTime t = period.from;
1245         while (t < period.to) {
1246                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1247                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1248                 if (samples) {
1249                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1250                         silence->make_silent ();
1251                         emit_audio (silence, t);
1252                 }
1253                 t += block;
1254         }
1255 }
1256
1257
1258 DCPTime
1259 Player::one_video_frame () const
1260 {
1261         return DCPTime::from_frames (1, _film->video_frame_rate ());
1262 }
1263
1264
1265 pair<shared_ptr<AudioBuffers>, DCPTime>
1266 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1267 {
1268         auto const discard_time = discard_to - time;
1269         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1270         auto remaining_frames = audio->frames() - discard_frames;
1271         if (remaining_frames <= 0) {
1272                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1273         }
1274         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1275         return make_pair(cut, time + discard_time);
1276 }
1277
1278
1279 void
1280 Player::set_dcp_decode_reduction (optional<int> reduction)
1281 {
1282         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1283
1284         {
1285                 boost::mutex::scoped_lock lm (_mutex);
1286
1287                 if (reduction == _dcp_decode_reduction) {
1288                         lm.unlock ();
1289                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1290                         return;
1291                 }
1292
1293                 _dcp_decode_reduction = reduction;
1294                 setup_pieces_unlocked ();
1295         }
1296
1297         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1298 }
1299
1300
1301 optional<DCPTime>
1302 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1303 {
1304         boost::mutex::scoped_lock lm (_mutex);
1305
1306         for (auto i: _pieces) {
1307                 if (i->content == content) {
1308                         return content_time_to_dcp (i, t);
1309                 }
1310         }
1311
1312         /* We couldn't find this content; perhaps things are being changed over */
1313         return {};
1314 }
1315
1316
1317 shared_ptr<const Playlist>
1318 Player::playlist () const
1319 {
1320         return _playlist ? _playlist : _film->playlist();
1321 }
1322
1323
1324 void
1325 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1326 {
1327         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1328 }
1329