wip: encoding; crashes on startup.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 using namespace dcpomatic;
82
83 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
84 int const PlayerProperty::PLAYLIST = 701;
85 int const PlayerProperty::FILM_CONTAINER = 702;
86 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
87 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
88 int const PlayerProperty::PLAYBACK_LENGTH = 705;
89
90 Player::Player (shared_ptr<const Film> film)
91         : _film (film)
92         , _suspended (0)
93         , _ignore_video (false)
94         , _ignore_audio (false)
95         , _ignore_text (false)
96         , _always_burn_open_subtitles (false)
97         , _fast (false)
98         , _tolerant (film->tolerant())
99         , _play_referenced (false)
100         , _audio_merger (_film->audio_frame_rate())
101         , _shuffler (0)
102 {
103         construct ();
104 }
105
106 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
107         : _film (film)
108         , _playlist (playlist_)
109         , _suspended (0)
110         , _ignore_video (false)
111         , _ignore_audio (false)
112         , _ignore_text (false)
113         , _always_burn_open_subtitles (false)
114         , _fast (false)
115         , _tolerant (film->tolerant())
116         , _play_referenced (false)
117         , _audio_merger (_film->audio_frame_rate())
118         , _shuffler (0)
119 {
120         construct ();
121 }
122
123 void
124 Player::construct ()
125 {
126         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
127         /* The butler must hear about this first, so since we are proxying this through to the butler we must
128            be first.
129         */
130         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
131         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
132         set_video_container_size (_film->frame_size ());
133
134         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
135
136         setup_pieces ();
137         seek (DCPTime (), true);
138 }
139
140 Player::~Player ()
141 {
142         delete _shuffler;
143 }
144
145 void
146 Player::setup_pieces ()
147 {
148         boost::mutex::scoped_lock lm (_mutex);
149         setup_pieces_unlocked ();
150 }
151
152
153 bool
154 have_video (shared_ptr<const Content> content)
155 {
156         return static_cast<bool>(content->video) && content->video->use();
157 }
158
159 bool
160 have_audio (shared_ptr<const Content> content)
161 {
162         return static_cast<bool>(content->audio);
163 }
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         list<shared_ptr<Piece> > old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
190                         if (j->content == i) {
191                                 old_decoder = j->decoder;
192                                 break;
193                         }
194                 }
195
196                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197                 DCPOMATIC_ASSERT (decoder);
198
199                 FrameRateChange frc (_film, i);
200
201                 if (decoder->video && _ignore_video) {
202                         decoder->video->set_ignore (true);
203                 }
204
205                 if (decoder->audio && _ignore_audio) {
206                         decoder->audio->set_ignore (true);
207                 }
208
209                 if (_ignore_text) {
210                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
211                                 i->set_ignore (true);
212                         }
213                 }
214
215                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
216                 if (dcp) {
217                         dcp->set_decode_referenced (_play_referenced);
218                         if (_play_referenced) {
219                                 dcp->set_forced_reduction (_dcp_decode_reduction);
220                         }
221                 }
222
223                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
224                 _pieces.push_back (piece);
225
226                 if (decoder->video) {
227                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
228                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
230                         } else {
231                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
232                         }
233                 }
234
235                 if (decoder->audio) {
236                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
237                 }
238
239                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
240
241                 while (j != decoder->text.end()) {
242                         (*j)->BitmapStart.connect (
243                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245                         (*j)->PlainStart.connect (
246                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->Stop.connect (
249                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251
252                         ++j;
253                 }
254
255                 if (decoder->atmos) {
256                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
257                 }
258         }
259
260         _stream_states.clear ();
261         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
262                 if (i->content->audio) {
263                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
264                                 _stream_states[j] = StreamState (i, i->content->position ());
265                         }
266                 }
267         }
268
269         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
270         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
271
272         _last_video_time = DCPTime ();
273         _last_video_eyes = EYES_BOTH;
274         _last_audio_time = DCPTime ();
275 }
276
277 void
278 Player::playlist_content_change (ChangeType type, int property, bool frequent)
279 {
280         if (type == CHANGE_TYPE_PENDING) {
281                 /* The player content is probably about to change, so we can't carry on
282                    until that has happened and we've rebuilt our pieces.  Stop pass()
283                    and seek() from working until then.
284                 */
285                 ++_suspended;
286         } else if (type == CHANGE_TYPE_DONE) {
287                 /* A change in our content has gone through.  Re-build our pieces. */
288                 setup_pieces ();
289                 --_suspended;
290         } else if (type == CHANGE_TYPE_CANCELLED) {
291                 --_suspended;
292         }
293
294         Change (type, property, frequent);
295 }
296
297 void
298 Player::set_video_container_size (dcp::Size s)
299 {
300         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
301
302         {
303                 boost::mutex::scoped_lock lm (_mutex);
304
305                 if (s == _video_container_size) {
306                         lm.unlock ();
307                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
308                         return;
309                 }
310
311                 _video_container_size = s;
312
313                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
314                 _black_image->make_black ();
315         }
316
317         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318 }
319
320 void
321 Player::playlist_change (ChangeType type)
322 {
323         if (type == CHANGE_TYPE_DONE) {
324                 setup_pieces ();
325         }
326         Change (type, PlayerProperty::PLAYLIST, false);
327 }
328
329 void
330 Player::film_change (ChangeType type, Film::Property p)
331 {
332         /* Here we should notice Film properties that affect our output, and
333            alert listeners that our output now would be different to how it was
334            last time we were run.
335         */
336
337         if (p == Film::CONTAINER) {
338                 Change (type, PlayerProperty::FILM_CONTAINER, false);
339         } else if (p == Film::VIDEO_FRAME_RATE) {
340                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
341                    so we need new pieces here.
342                 */
343                 if (type == CHANGE_TYPE_DONE) {
344                         setup_pieces ();
345                 }
346                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
347         } else if (p == Film::AUDIO_PROCESSOR) {
348                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
349                         boost::mutex::scoped_lock lm (_mutex);
350                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
351                 }
352         } else if (p == Film::AUDIO_CHANNELS) {
353                 if (type == CHANGE_TYPE_DONE) {
354                         boost::mutex::scoped_lock lm (_mutex);
355                         _audio_merger.clear ();
356                 }
357         }
358 }
359
360 shared_ptr<PlayerVideo>
361 Player::black_player_video_frame (Eyes eyes) const
362 {
363         return shared_ptr<PlayerVideo> (
364                 new PlayerVideo (
365                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
366                         Crop (),
367                         optional<double> (),
368                         _video_container_size,
369                         _video_container_size,
370                         eyes,
371                         PART_WHOLE,
372                         PresetColourConversion::all().front().conversion,
373                         VIDEO_RANGE_FULL,
374                         boost::weak_ptr<Content>(),
375                         boost::optional<Frame>(),
376                         false
377                 )
378         );
379 }
380
381 Frame
382 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
383 {
384         DCPTime s = t - piece->content->position ();
385         s = min (piece->content->length_after_trim(_film), s);
386         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
387
388         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
389            then convert that ContentTime to frames at the content's rate.  However this fails for
390            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
391            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
392
393            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
394         */
395         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
396 }
397
398 DCPTime
399 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
400 {
401         /* See comment in dcp_to_content_video */
402         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
403         return d + piece->content->position();
404 }
405
406 Frame
407 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
408 {
409         DCPTime s = t - piece->content->position ();
410         s = min (piece->content->length_after_trim(_film), s);
411         /* See notes in dcp_to_content_video */
412         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
413 }
414
415 DCPTime
416 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
417 {
418         /* See comment in dcp_to_content_video */
419         return DCPTime::from_frames (f, _film->audio_frame_rate())
420                 - DCPTime (piece->content->trim_start(), piece->frc)
421                 + piece->content->position();
422 }
423
424 ContentTime
425 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
426 {
427         DCPTime s = t - piece->content->position ();
428         s = min (piece->content->length_after_trim(_film), s);
429         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
430 }
431
432 DCPTime
433 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
434 {
435         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
436 }
437
438 list<shared_ptr<Font> >
439 Player::get_subtitle_fonts ()
440 {
441         boost::mutex::scoped_lock lm (_mutex);
442
443         list<shared_ptr<Font> > fonts;
444         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
445                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
446                         /* XXX: things may go wrong if there are duplicate font IDs
447                            with different font files.
448                         */
449                         list<shared_ptr<Font> > f = j->fonts ();
450                         copy (f.begin(), f.end(), back_inserter (fonts));
451                 }
452         }
453
454         return fonts;
455 }
456
457 /** Set this player never to produce any video data */
458 void
459 Player::set_ignore_video ()
460 {
461         boost::mutex::scoped_lock lm (_mutex);
462         _ignore_video = true;
463         setup_pieces_unlocked ();
464 }
465
466 void
467 Player::set_ignore_audio ()
468 {
469         boost::mutex::scoped_lock lm (_mutex);
470         _ignore_audio = true;
471         setup_pieces_unlocked ();
472 }
473
474 void
475 Player::set_ignore_text ()
476 {
477         boost::mutex::scoped_lock lm (_mutex);
478         _ignore_text = true;
479         setup_pieces_unlocked ();
480 }
481
482 /** Set the player to always burn open texts into the image regardless of the content settings */
483 void
484 Player::set_always_burn_open_subtitles ()
485 {
486         boost::mutex::scoped_lock lm (_mutex);
487         _always_burn_open_subtitles = true;
488 }
489
490 /** Sets up the player to be faster, possibly at the expense of quality */
491 void
492 Player::set_fast ()
493 {
494         boost::mutex::scoped_lock lm (_mutex);
495         _fast = true;
496         setup_pieces_unlocked ();
497 }
498
499 void
500 Player::set_play_referenced ()
501 {
502         boost::mutex::scoped_lock lm (_mutex);
503         _play_referenced = true;
504         setup_pieces_unlocked ();
505 }
506
507 static void
508 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
509 {
510         DCPOMATIC_ASSERT (r);
511         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
512         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
513         if (r->actual_duration() > 0) {
514                 a.push_back (
515                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
516                         );
517         }
518 }
519
520 list<ReferencedReelAsset>
521 Player::get_reel_assets ()
522 {
523         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
524
525         list<ReferencedReelAsset> a;
526
527         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
528                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
529                 if (!j) {
530                         continue;
531                 }
532
533                 scoped_ptr<DCPDecoder> decoder;
534                 try {
535                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
536                 } catch (...) {
537                         return a;
538                 }
539
540                 DCPOMATIC_ASSERT (j->video_frame_rate ());
541                 double const cfr = j->video_frame_rate().get();
542                 Frame const trim_start = j->trim_start().frames_round (cfr);
543                 Frame const trim_end = j->trim_end().frames_round (cfr);
544                 int const ffr = _film->video_frame_rate ();
545
546                 /* position in the asset from the start */
547                 int64_t offset_from_start = 0;
548                 /* position in the asset from the end */
549                 int64_t offset_from_end = 0;
550                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
551                         /* Assume that main picture duration is the length of the reel */
552                         offset_from_end += k->main_picture()->actual_duration();
553                 }
554
555                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
556
557                         /* Assume that main picture duration is the length of the reel */
558                         int64_t const reel_duration = k->main_picture()->actual_duration();
559
560                         /* See doc/design/trim_reels.svg */
561                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
562                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
563
564                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
565                         if (j->reference_video ()) {
566                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
567                         }
568
569                         if (j->reference_audio ()) {
570                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
571                         }
572
573                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
574                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
575                         }
576
577                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
578                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
579                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
580                                 }
581                         }
582
583                         offset_from_start += reel_duration;
584                         offset_from_end -= reel_duration;
585                 }
586         }
587
588         return a;
589 }
590
591 bool
592 Player::pass ()
593 {
594         boost::mutex::scoped_lock lm (_mutex);
595
596         if (_suspended) {
597                 /* We can't pass in this state */
598                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
599                 return false;
600         }
601
602         if (_playback_length == DCPTime()) {
603                 /* Special; just give one black frame */
604                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
605                 return true;
606         }
607
608         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
609
610         shared_ptr<Piece> earliest_content;
611         optional<DCPTime> earliest_time;
612
613         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
614                 if (i->done) {
615                         continue;
616                 }
617
618                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
619                 if (t > i->content->end(_film)) {
620                         i->done = true;
621                 } else {
622
623                         /* Given two choices at the same time, pick the one with texts so we see it before
624                            the video.
625                         */
626                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
627                                 earliest_time = t;
628                                 earliest_content = i;
629                         }
630                 }
631         }
632
633         bool done = false;
634
635         enum {
636                 NONE,
637                 CONTENT,
638                 BLACK,
639                 SILENT
640         } which = NONE;
641
642         if (earliest_content) {
643                 which = CONTENT;
644         }
645
646         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
647                 earliest_time = _black.position ();
648                 which = BLACK;
649         }
650
651         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
652                 earliest_time = _silent.position ();
653                 which = SILENT;
654         }
655
656         switch (which) {
657         case CONTENT:
658         {
659                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
660                 earliest_content->done = earliest_content->decoder->pass ();
661                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
662                 if (dcp && !_play_referenced && dcp->reference_audio()) {
663                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
664                            to `hide' the fact that no audio was emitted during the referenced DCP (though
665                            we need to behave as though it was).
666                         */
667                         _last_audio_time = dcp->end (_film);
668                 }
669                 break;
670         }
671         case BLACK:
672                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
673                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
674                 _black.set_position (_black.position() + one_video_frame());
675                 break;
676         case SILENT:
677         {
678                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
679                 DCPTimePeriod period (_silent.period_at_position());
680                 if (_last_audio_time) {
681                         /* Sometimes the thing that happened last finishes fractionally before
682                            or after this silence.  Bodge the start time of the silence to fix it.
683                            I think this is nothing to worry about since we will just add or
684                            remove a little silence at the end of some content.
685                         */
686                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
687                         /* Let's not worry about less than a frame at 24fps */
688                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
689                         if (error >= too_much_error) {
690                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
691                         }
692                         DCPOMATIC_ASSERT (error < too_much_error);
693                         period.from = *_last_audio_time;
694                 }
695                 if (period.duration() > one_video_frame()) {
696                         period.to = period.from + one_video_frame();
697                 }
698                 fill_audio (period);
699                 _silent.set_position (period.to);
700                 break;
701         }
702         case NONE:
703                 done = true;
704                 break;
705         }
706
707         /* Emit any audio that is ready */
708
709         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
710            of our streams, or the position of the _silent.
711         */
712         DCPTime pull_to = _playback_length;
713         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
714                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
715                         pull_to = i->second.last_push_end;
716                 }
717         }
718         if (!_silent.done() && _silent.position() < pull_to) {
719                 pull_to = _silent.position();
720         }
721
722         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
723         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
724         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
725                 if (_last_audio_time && i->second < *_last_audio_time) {
726                         /* This new data comes before the last we emitted (or the last seek); discard it */
727                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
728                         if (!cut.first) {
729                                 continue;
730                         }
731                         *i = cut;
732                 } else if (_last_audio_time && i->second > *_last_audio_time) {
733                         /* There's a gap between this data and the last we emitted; fill with silence */
734                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
735                 }
736
737                 emit_audio (i->first, i->second);
738         }
739
740         if (done) {
741                 _shuffler->flush ();
742                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
743                         do_emit_video(i->first, i->second);
744                 }
745         }
746
747         return done;
748 }
749
750 /** @return Open subtitles for the frame at the given time, converted to images */
751 optional<PositionImage>
752 Player::open_subtitles_for_frame (DCPTime time) const
753 {
754         list<PositionImage> captions;
755         int const vfr = _film->video_frame_rate();
756
757         BOOST_FOREACH (
758                 PlayerText j,
759                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
760                 ) {
761
762                 /* Bitmap subtitles */
763                 BOOST_FOREACH (BitmapText i, j.bitmap) {
764                         if (!i.image) {
765                                 continue;
766                         }
767
768                         /* i.image will already have been scaled to fit _video_container_size */
769                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
770
771                         captions.push_back (
772                                 PositionImage (
773                                         i.image,
774                                         Position<int> (
775                                                 lrint (_video_container_size.width * i.rectangle.x),
776                                                 lrint (_video_container_size.height * i.rectangle.y)
777                                                 )
778                                         )
779                                 );
780                 }
781
782                 /* String subtitles (rendered to an image) */
783                 if (!j.string.empty ()) {
784                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
785                         copy (s.begin(), s.end(), back_inserter (captions));
786                 }
787         }
788
789         if (captions.empty ()) {
790                 return optional<PositionImage> ();
791         }
792
793         return merge (captions);
794 }
795
796 void
797 Player::video (weak_ptr<Piece> wp, ContentVideo video)
798 {
799         shared_ptr<Piece> piece = wp.lock ();
800         if (!piece) {
801                 return;
802         }
803
804         if (!piece->content->video->use()) {
805                 return;
806         }
807
808         FrameRateChange frc (_film, piece->content);
809         if (frc.skip && (video.frame % 2) == 1) {
810                 return;
811         }
812
813         /* Time of the first frame we will emit */
814         DCPTime const time = content_video_to_dcp (piece, video.frame);
815         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
816
817         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
818            if it's after the content's period here as in that case we still need to fill any gap between
819            `now' and the end of the content's period.
820         */
821         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
822                 return;
823         }
824
825         /* Fill gaps that we discover now that we have some video which needs to be emitted.
826            This is where we need to fill to.
827         */
828         DCPTime fill_to = min (time, piece->content->end(_film));
829
830         if (_last_video_time) {
831                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
832
833                 /* Fill if we have more than half a frame to do */
834                 if ((fill_to - fill_from) > one_video_frame() / 2) {
835                         LastVideoMap::const_iterator last = _last_video.find (wp);
836                         if (_film->three_d()) {
837                                 Eyes fill_to_eyes = video.eyes;
838                                 if (fill_to_eyes == EYES_BOTH) {
839                                         fill_to_eyes = EYES_LEFT;
840                                 }
841                                 if (fill_to == piece->content->end(_film)) {
842                                         /* Don't fill after the end of the content */
843                                         fill_to_eyes = EYES_LEFT;
844                                 }
845                                 DCPTime j = fill_from;
846                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
847                                 if (eyes == EYES_BOTH) {
848                                         eyes = EYES_LEFT;
849                                 }
850                                 while (j < fill_to || eyes != fill_to_eyes) {
851                                         if (last != _last_video.end()) {
852                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
853                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
854                                                 copy->set_eyes (eyes);
855                                                 emit_video (copy, j);
856                                         } else {
857                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
858                                                 emit_video (black_player_video_frame(eyes), j);
859                                         }
860                                         if (eyes == EYES_RIGHT) {
861                                                 j += one_video_frame();
862                                         }
863                                         eyes = increment_eyes (eyes);
864                                 }
865                         } else {
866                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
867                                         if (last != _last_video.end()) {
868                                                 emit_video (last->second, j);
869                                         } else {
870                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
871                                         }
872                                 }
873                         }
874                 }
875         }
876
877         _last_video[wp].reset (
878                 new PlayerVideo (
879                         video.image,
880                         piece->content->video->crop (),
881                         piece->content->video->fade (_film, video.frame),
882                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
883                         _video_container_size,
884                         video.eyes,
885                         video.part,
886                         piece->content->video->colour_conversion(),
887                         piece->content->video->range(),
888                         piece->content,
889                         video.frame,
890                         false
891                         )
892                 );
893
894 #ifdef DCPOMATIC_DEBUG
895         _last_video[wp]->time = time;
896 #endif
897
898         DCPTime t = time;
899         for (int i = 0; i < frc.repeat; ++i) {
900                 if (t < piece->content->end(_film)) {
901                         emit_video (_last_video[wp], t);
902                 }
903                 t += one_video_frame ();
904         }
905 }
906
907 void
908 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
909 {
910         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
911
912         shared_ptr<Piece> piece = wp.lock ();
913         if (!piece) {
914                 return;
915         }
916
917         shared_ptr<AudioContent> content = piece->content->audio;
918         DCPOMATIC_ASSERT (content);
919
920         int const rfr = content->resampled_frame_rate (_film);
921
922         /* Compute time in the DCP */
923         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
924         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
925
926         /* And the end of this block in the DCP */
927         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
928
929         /* Remove anything that comes before the start or after the end of the content */
930         if (time < piece->content->position()) {
931                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
932                 if (!cut.first) {
933                         /* This audio is entirely discarded */
934                         return;
935                 }
936                 content_audio.audio = cut.first;
937                 time = cut.second;
938         } else if (time > piece->content->end(_film)) {
939                 /* Discard it all */
940                 return;
941         } else if (end > piece->content->end(_film)) {
942                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
943                 if (remaining_frames == 0) {
944                         return;
945                 }
946                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
947         }
948
949         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
950
951         /* Gain */
952
953         if (content->gain() != 0) {
954                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
955                 gain->apply_gain (content->gain ());
956                 content_audio.audio = gain;
957         }
958
959         /* Remap */
960
961         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
962
963         /* Process */
964
965         if (_audio_processor) {
966                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
967         }
968
969         /* Push */
970
971         _audio_merger.push (content_audio.audio, time);
972         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
973         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
974 }
975
976 void
977 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
978 {
979         shared_ptr<Piece> piece = wp.lock ();
980         shared_ptr<const TextContent> text = wc.lock ();
981         if (!piece || !text) {
982                 return;
983         }
984
985         /* Apply content's subtitle offsets */
986         subtitle.sub.rectangle.x += text->x_offset ();
987         subtitle.sub.rectangle.y += text->y_offset ();
988
989         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
990         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
991         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
992
993         /* Apply content's subtitle scale */
994         subtitle.sub.rectangle.width *= text->x_scale ();
995         subtitle.sub.rectangle.height *= text->y_scale ();
996
997         PlayerText ps;
998         shared_ptr<Image> image = subtitle.sub.image;
999
1000         /* We will scale the subtitle up to fit _video_container_size */
1001         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1002         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1003         if (width == 0 || height == 0) {
1004                 return;
1005         }
1006
1007         dcp::Size scaled_size (width, height);
1008         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1009         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1010
1011         _active_texts[text->type()].add_from (wc, ps, from);
1012 }
1013
1014 void
1015 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1016 {
1017         shared_ptr<Piece> piece = wp.lock ();
1018         shared_ptr<const TextContent> text = wc.lock ();
1019         if (!piece || !text) {
1020                 return;
1021         }
1022
1023         PlayerText ps;
1024         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1025
1026         if (from > piece->content->end(_film)) {
1027                 return;
1028         }
1029
1030         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1031                 s.set_h_position (s.h_position() + text->x_offset ());
1032                 s.set_v_position (s.v_position() + text->y_offset ());
1033                 float const xs = text->x_scale();
1034                 float const ys = text->y_scale();
1035                 float size = s.size();
1036
1037                 /* Adjust size to express the common part of the scaling;
1038                    e.g. if xs = ys = 0.5 we scale size by 2.
1039                 */
1040                 if (xs > 1e-5 && ys > 1e-5) {
1041                         size *= 1 / min (1 / xs, 1 / ys);
1042                 }
1043                 s.set_size (size);
1044
1045                 /* Then express aspect ratio changes */
1046                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1047                         s.set_aspect_adjust (xs / ys);
1048                 }
1049
1050                 s.set_in (dcp::Time(from.seconds(), 1000));
1051                 ps.string.push_back (StringText (s, text->outline_width()));
1052                 ps.add_fonts (text->fonts ());
1053         }
1054
1055         _active_texts[text->type()].add_from (wc, ps, from);
1056 }
1057
1058 void
1059 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1060 {
1061         shared_ptr<const TextContent> text = wc.lock ();
1062         if (!text) {
1063                 return;
1064         }
1065
1066         if (!_active_texts[text->type()].have(wc)) {
1067                 return;
1068         }
1069
1070         shared_ptr<Piece> piece = wp.lock ();
1071         if (!piece) {
1072                 return;
1073         }
1074
1075         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1076
1077         if (dcp_to > piece->content->end(_film)) {
1078                 return;
1079         }
1080
1081         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1082
1083         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1084         if (text->use() && !always && !text->burn()) {
1085                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1086         }
1087 }
1088
1089 void
1090 Player::seek (DCPTime time, bool accurate)
1091 {
1092         boost::mutex::scoped_lock lm (_mutex);
1093         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1094
1095         if (_suspended) {
1096                 /* We can't seek in this state */
1097                 return;
1098         }
1099
1100         if (_shuffler) {
1101                 _shuffler->clear ();
1102         }
1103
1104         _delay.clear ();
1105
1106         if (_audio_processor) {
1107                 _audio_processor->flush ();
1108         }
1109
1110         _audio_merger.clear ();
1111         for (int i = 0; i < TEXT_COUNT; ++i) {
1112                 _active_texts[i].clear ();
1113         }
1114
1115         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1116                 if (time < i->content->position()) {
1117                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1118                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1119                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1120                            been trimmed to a point between keyframes, or something).
1121                         */
1122                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1123                         i->done = false;
1124                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1125                         /* During; seek to position */
1126                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1127                         i->done = false;
1128                 } else {
1129                         /* After; this piece is done */
1130                         i->done = true;
1131                 }
1132         }
1133
1134         if (accurate) {
1135                 _last_video_time = time;
1136                 _last_video_eyes = EYES_LEFT;
1137                 _last_audio_time = time;
1138         } else {
1139                 _last_video_time = optional<DCPTime>();
1140                 _last_video_eyes = optional<Eyes>();
1141                 _last_audio_time = optional<DCPTime>();
1142         }
1143
1144         _black.set_position (time);
1145         _silent.set_position (time);
1146
1147         _last_video.clear ();
1148 }
1149
1150 void
1151 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1152 {
1153         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1154            player before the video that requires them.
1155         */
1156         _delay.push_back (make_pair (pv, time));
1157
1158         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1159                 _last_video_time = time + one_video_frame();
1160         }
1161         _last_video_eyes = increment_eyes (pv->eyes());
1162
1163         if (_delay.size() < 3) {
1164                 return;
1165         }
1166
1167         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1168         _delay.pop_front();
1169         do_emit_video (to_do.first, to_do.second);
1170 }
1171
1172 void
1173 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1174 {
1175         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1176                 for (int i = 0; i < TEXT_COUNT; ++i) {
1177                         _active_texts[i].clear_before (time);
1178                 }
1179         }
1180
1181         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1182         if (subtitles) {
1183                 pv->set_text (subtitles.get ());
1184         }
1185
1186         Video (pv, time);
1187 }
1188
1189 void
1190 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1191 {
1192         /* Log if the assert below is about to fail */
1193         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1194                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1195         }
1196
1197         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1198         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1199         Audio (data, time, _film->audio_frame_rate());
1200         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1201 }
1202
1203 void
1204 Player::fill_audio (DCPTimePeriod period)
1205 {
1206         if (period.from == period.to) {
1207                 return;
1208         }
1209
1210         DCPOMATIC_ASSERT (period.from < period.to);
1211
1212         DCPTime t = period.from;
1213         while (t < period.to) {
1214                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1215                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1216                 if (samples) {
1217                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1218                         silence->make_silent ();
1219                         emit_audio (silence, t);
1220                 }
1221                 t += block;
1222         }
1223 }
1224
1225 DCPTime
1226 Player::one_video_frame () const
1227 {
1228         return DCPTime::from_frames (1, _film->video_frame_rate ());
1229 }
1230
1231 pair<shared_ptr<AudioBuffers>, DCPTime>
1232 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1233 {
1234         DCPTime const discard_time = discard_to - time;
1235         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1236         Frame remaining_frames = audio->frames() - discard_frames;
1237         if (remaining_frames <= 0) {
1238                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1239         }
1240         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1241         return make_pair(cut, time + discard_time);
1242 }
1243
1244 void
1245 Player::set_dcp_decode_reduction (optional<int> reduction)
1246 {
1247         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1248
1249         {
1250                 boost::mutex::scoped_lock lm (_mutex);
1251
1252                 if (reduction == _dcp_decode_reduction) {
1253                         lm.unlock ();
1254                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1255                         return;
1256                 }
1257
1258                 _dcp_decode_reduction = reduction;
1259                 setup_pieces_unlocked ();
1260         }
1261
1262         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1263 }
1264
1265 optional<DCPTime>
1266 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1267 {
1268         boost::mutex::scoped_lock lm (_mutex);
1269
1270         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1271                 if (i->content == content) {
1272                         return content_time_to_dcp (i, t);
1273                 }
1274         }
1275
1276         /* We couldn't find this content; perhaps things are being changed over */
1277         return optional<DCPTime>();
1278 }
1279
1280
1281 shared_ptr<const Playlist>
1282 Player::playlist () const
1283 {
1284         return _playlist ? _playlist : _film->playlist();
1285 }
1286
1287
1288 void
1289 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1290 {
1291         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1292 }
1293