Changes to crop can be handled with a reset_metadata().
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
83 #endif
84 using namespace dcpomatic;
85
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92
93 Player::Player (shared_ptr<const Film> film)
94         : _film (film)
95         , _suspended (0)
96         , _ignore_video (false)
97         , _ignore_audio (false)
98         , _ignore_text (false)
99         , _always_burn_open_subtitles (false)
100         , _fast (false)
101         , _tolerant (film->tolerant())
102         , _play_referenced (false)
103         , _audio_merger (_film->audio_frame_rate())
104         , _shuffler (0)
105 {
106         construct ();
107 }
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _ignore_video (false)
114         , _ignore_audio (false)
115         , _ignore_text (false)
116         , _always_burn_open_subtitles (false)
117         , _fast (false)
118         , _tolerant (film->tolerant())
119         , _play_referenced (false)
120         , _audio_merger (_film->audio_frame_rate())
121         , _shuffler (0)
122 {
123         construct ();
124 }
125
126 void
127 Player::construct ()
128 {
129         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130         /* The butler must hear about this first, so since we are proxying this through to the butler we must
131            be first.
132         */
133         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135         set_video_container_size (_film->frame_size ());
136
137         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
138
139         setup_pieces ();
140         seek (DCPTime (), true);
141 }
142
143 Player::~Player ()
144 {
145         delete _shuffler;
146 }
147
148 void
149 Player::setup_pieces ()
150 {
151         boost::mutex::scoped_lock lm (_mutex);
152         setup_pieces_unlocked ();
153 }
154
155
156 bool
157 have_video (shared_ptr<const Content> content)
158 {
159         return static_cast<bool>(content->video) && content->video->use();
160 }
161
162 bool
163 have_audio (shared_ptr<const Content> content)
164 {
165         return static_cast<bool>(content->audio);
166 }
167
168 void
169 Player::setup_pieces_unlocked ()
170 {
171         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
172
173         list<shared_ptr<Piece> > old_pieces = _pieces;
174         _pieces.clear ();
175
176         delete _shuffler;
177         _shuffler = new Shuffler();
178         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
179
180         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
181
182                 if (!i->paths_valid ()) {
183                         continue;
184                 }
185
186                 if (_ignore_video && _ignore_audio && i->text.empty()) {
187                         /* We're only interested in text and this content has none */
188                         continue;
189                 }
190
191                 shared_ptr<Decoder> old_decoder;
192                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
193                         if (j->content == i) {
194                                 old_decoder = j->decoder;
195                                 break;
196                         }
197                 }
198
199                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
200                 DCPOMATIC_ASSERT (decoder);
201
202                 FrameRateChange frc (_film, i);
203
204                 if (decoder->video && _ignore_video) {
205                         decoder->video->set_ignore (true);
206                 }
207
208                 if (decoder->audio && _ignore_audio) {
209                         decoder->audio->set_ignore (true);
210                 }
211
212                 if (_ignore_text) {
213                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
214                                 i->set_ignore (true);
215                         }
216                 }
217
218                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
219                 if (dcp) {
220                         dcp->set_decode_referenced (_play_referenced);
221                         if (_play_referenced) {
222                                 dcp->set_forced_reduction (_dcp_decode_reduction);
223                         }
224                 }
225
226                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
227                 _pieces.push_back (piece);
228
229                 if (decoder->video) {
230                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
231                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
232                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
233                         } else {
234                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
235                         }
236                 }
237
238                 if (decoder->audio) {
239                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240                 }
241
242                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
243
244                 while (j != decoder->text.end()) {
245                         (*j)->BitmapStart.connect (
246                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->PlainStart.connect (
249                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251                         (*j)->Stop.connect (
252                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
253                                 );
254
255                         ++j;
256                 }
257
258                 if (decoder->atmos) {
259                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260                 }
261         }
262
263         _stream_states.clear ();
264         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
265                 if (i->content->audio) {
266                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
267                                 _stream_states[j] = StreamState (i, i->content->position ());
268                         }
269                 }
270         }
271
272         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
273         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
274
275         _last_video_time = DCPTime ();
276         _last_video_eyes = EYES_BOTH;
277         _last_audio_time = DCPTime ();
278 }
279
280 void
281 Player::playlist_content_change (ChangeType type, int property, bool frequent)
282 {
283         if (property == VideoContentProperty::CROP) {
284                 if (type == CHANGE_TYPE_DONE) {
285                         dcp::Size const vcs = video_container_size();
286                         boost::mutex::scoped_lock lm (_mutex);
287                         for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
288                                 i->first->reset_metadata (_film, vcs);
289                         }
290                 }
291         } else {
292                 if (type == CHANGE_TYPE_PENDING) {
293                         /* The player content is probably about to change, so we can't carry on
294                            until that has happened and we've rebuilt our pieces.  Stop pass()
295                            and seek() from working until then.
296                         */
297                         ++_suspended;
298                 } else if (type == CHANGE_TYPE_DONE) {
299                         /* A change in our content has gone through.  Re-build our pieces. */
300                         setup_pieces ();
301                         --_suspended;
302                 } else if (type == CHANGE_TYPE_CANCELLED) {
303                         --_suspended;
304                 }
305         }
306
307         Change (type, property, frequent);
308 }
309
310 void
311 Player::set_video_container_size (dcp::Size s)
312 {
313         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
314
315         {
316                 boost::mutex::scoped_lock lm (_mutex);
317
318                 if (s == _video_container_size) {
319                         lm.unlock ();
320                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321                         return;
322                 }
323
324                 _video_container_size = s;
325
326                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
327                 _black_image->make_black ();
328         }
329
330         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
331 }
332
333 void
334 Player::playlist_change (ChangeType type)
335 {
336         if (type == CHANGE_TYPE_DONE) {
337                 setup_pieces ();
338         }
339         Change (type, PlayerProperty::PLAYLIST, false);
340 }
341
342 void
343 Player::film_change (ChangeType type, Film::Property p)
344 {
345         /* Here we should notice Film properties that affect our output, and
346            alert listeners that our output now would be different to how it was
347            last time we were run.
348         */
349
350         if (p == Film::CONTAINER) {
351                 Change (type, PlayerProperty::FILM_CONTAINER, false);
352         } else if (p == Film::VIDEO_FRAME_RATE) {
353                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
354                    so we need new pieces here.
355                 */
356                 if (type == CHANGE_TYPE_DONE) {
357                         setup_pieces ();
358                 }
359                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
360         } else if (p == Film::AUDIO_PROCESSOR) {
361                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
362                         boost::mutex::scoped_lock lm (_mutex);
363                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
364                 }
365         } else if (p == Film::AUDIO_CHANNELS) {
366                 if (type == CHANGE_TYPE_DONE) {
367                         boost::mutex::scoped_lock lm (_mutex);
368                         _audio_merger.clear ();
369                 }
370         }
371 }
372
373 shared_ptr<PlayerVideo>
374 Player::black_player_video_frame (Eyes eyes) const
375 {
376         return shared_ptr<PlayerVideo> (
377                 new PlayerVideo (
378                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
379                         Crop (),
380                         optional<double> (),
381                         _video_container_size,
382                         _video_container_size,
383                         eyes,
384                         PART_WHOLE,
385                         PresetColourConversion::all().front().conversion,
386                         VIDEO_RANGE_FULL,
387                         boost::weak_ptr<Content>(),
388                         boost::optional<Frame>(),
389                         false
390                 )
391         );
392 }
393
394 Frame
395 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
396 {
397         DCPTime s = t - piece->content->position ();
398         s = min (piece->content->length_after_trim(_film), s);
399         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
400
401         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
402            then convert that ContentTime to frames at the content's rate.  However this fails for
403            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
404            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
405
406            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
407         */
408         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
409 }
410
411 DCPTime
412 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
413 {
414         /* See comment in dcp_to_content_video */
415         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
416         return d + piece->content->position();
417 }
418
419 Frame
420 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
421 {
422         DCPTime s = t - piece->content->position ();
423         s = min (piece->content->length_after_trim(_film), s);
424         /* See notes in dcp_to_content_video */
425         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
426 }
427
428 DCPTime
429 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
430 {
431         /* See comment in dcp_to_content_video */
432         return DCPTime::from_frames (f, _film->audio_frame_rate())
433                 - DCPTime (piece->content->trim_start(), piece->frc)
434                 + piece->content->position();
435 }
436
437 ContentTime
438 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
439 {
440         DCPTime s = t - piece->content->position ();
441         s = min (piece->content->length_after_trim(_film), s);
442         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
443 }
444
445 DCPTime
446 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
447 {
448         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
449 }
450
451 list<shared_ptr<Font> >
452 Player::get_subtitle_fonts ()
453 {
454         boost::mutex::scoped_lock lm (_mutex);
455
456         list<shared_ptr<Font> > fonts;
457         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
458                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
459                         /* XXX: things may go wrong if there are duplicate font IDs
460                            with different font files.
461                         */
462                         list<shared_ptr<Font> > f = j->fonts ();
463                         copy (f.begin(), f.end(), back_inserter (fonts));
464                 }
465         }
466
467         return fonts;
468 }
469
470 /** Set this player never to produce any video data */
471 void
472 Player::set_ignore_video ()
473 {
474         boost::mutex::scoped_lock lm (_mutex);
475         _ignore_video = true;
476         setup_pieces_unlocked ();
477 }
478
479 void
480 Player::set_ignore_audio ()
481 {
482         boost::mutex::scoped_lock lm (_mutex);
483         _ignore_audio = true;
484         setup_pieces_unlocked ();
485 }
486
487 void
488 Player::set_ignore_text ()
489 {
490         boost::mutex::scoped_lock lm (_mutex);
491         _ignore_text = true;
492         setup_pieces_unlocked ();
493 }
494
495 /** Set the player to always burn open texts into the image regardless of the content settings */
496 void
497 Player::set_always_burn_open_subtitles ()
498 {
499         boost::mutex::scoped_lock lm (_mutex);
500         _always_burn_open_subtitles = true;
501 }
502
503 /** Sets up the player to be faster, possibly at the expense of quality */
504 void
505 Player::set_fast ()
506 {
507         boost::mutex::scoped_lock lm (_mutex);
508         _fast = true;
509         setup_pieces_unlocked ();
510 }
511
512 void
513 Player::set_play_referenced ()
514 {
515         boost::mutex::scoped_lock lm (_mutex);
516         _play_referenced = true;
517         setup_pieces_unlocked ();
518 }
519
520 static void
521 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
522 {
523         DCPOMATIC_ASSERT (r);
524         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
525         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
526         if (r->actual_duration() > 0) {
527                 a.push_back (
528                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
529                         );
530         }
531 }
532
533 list<ReferencedReelAsset>
534 Player::get_reel_assets ()
535 {
536         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
537
538         list<ReferencedReelAsset> a;
539
540         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
541                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
542                 if (!j) {
543                         continue;
544                 }
545
546                 scoped_ptr<DCPDecoder> decoder;
547                 try {
548                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
549                 } catch (...) {
550                         return a;
551                 }
552
553                 DCPOMATIC_ASSERT (j->video_frame_rate ());
554                 double const cfr = j->video_frame_rate().get();
555                 Frame const trim_start = j->trim_start().frames_round (cfr);
556                 Frame const trim_end = j->trim_end().frames_round (cfr);
557                 int const ffr = _film->video_frame_rate ();
558
559                 /* position in the asset from the start */
560                 int64_t offset_from_start = 0;
561                 /* position in the asset from the end */
562                 int64_t offset_from_end = 0;
563                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
564                         /* Assume that main picture duration is the length of the reel */
565                         offset_from_end += k->main_picture()->actual_duration();
566                 }
567
568                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
569
570                         /* Assume that main picture duration is the length of the reel */
571                         int64_t const reel_duration = k->main_picture()->actual_duration();
572
573                         /* See doc/design/trim_reels.svg */
574                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
575                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
576
577                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
578                         if (j->reference_video ()) {
579                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
580                         }
581
582                         if (j->reference_audio ()) {
583                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
584                         }
585
586                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
587                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
588                         }
589
590                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
591                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
592                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
593                                 }
594                         }
595
596                         offset_from_start += reel_duration;
597                         offset_from_end -= reel_duration;
598                 }
599         }
600
601         return a;
602 }
603
604 bool
605 Player::pass ()
606 {
607         boost::mutex::scoped_lock lm (_mutex);
608
609         if (_suspended) {
610                 /* We can't pass in this state */
611                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
612                 return false;
613         }
614
615         if (_playback_length == DCPTime()) {
616                 /* Special; just give one black frame */
617                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
618                 return true;
619         }
620
621         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
622
623         shared_ptr<Piece> earliest_content;
624         optional<DCPTime> earliest_time;
625
626         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
627                 if (i->done) {
628                         continue;
629                 }
630
631                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
632                 if (t > i->content->end(_film)) {
633                         i->done = true;
634                 } else {
635
636                         /* Given two choices at the same time, pick the one with texts so we see it before
637                            the video.
638                         */
639                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
640                                 earliest_time = t;
641                                 earliest_content = i;
642                         }
643                 }
644         }
645
646         bool done = false;
647
648         enum {
649                 NONE,
650                 CONTENT,
651                 BLACK,
652                 SILENT
653         } which = NONE;
654
655         if (earliest_content) {
656                 which = CONTENT;
657         }
658
659         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
660                 earliest_time = _black.position ();
661                 which = BLACK;
662         }
663
664         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
665                 earliest_time = _silent.position ();
666                 which = SILENT;
667         }
668
669         switch (which) {
670         case CONTENT:
671         {
672                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
673                 earliest_content->done = earliest_content->decoder->pass ();
674                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
675                 if (dcp && !_play_referenced && dcp->reference_audio()) {
676                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
677                            to `hide' the fact that no audio was emitted during the referenced DCP (though
678                            we need to behave as though it was).
679                         */
680                         _last_audio_time = dcp->end (_film);
681                 }
682                 break;
683         }
684         case BLACK:
685                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
686                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
687                 _black.set_position (_black.position() + one_video_frame());
688                 break;
689         case SILENT:
690         {
691                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
692                 DCPTimePeriod period (_silent.period_at_position());
693                 if (_last_audio_time) {
694                         /* Sometimes the thing that happened last finishes fractionally before
695                            or after this silence.  Bodge the start time of the silence to fix it.
696                            I think this is nothing to worry about since we will just add or
697                            remove a little silence at the end of some content.
698                         */
699                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
700                         /* Let's not worry about less than a frame at 24fps */
701                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
702                         if (error >= too_much_error) {
703                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
704                         }
705                         DCPOMATIC_ASSERT (error < too_much_error);
706                         period.from = *_last_audio_time;
707                 }
708                 if (period.duration() > one_video_frame()) {
709                         period.to = period.from + one_video_frame();
710                 }
711                 fill_audio (period);
712                 _silent.set_position (period.to);
713                 break;
714         }
715         case NONE:
716                 done = true;
717                 break;
718         }
719
720         /* Emit any audio that is ready */
721
722         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
723            of our streams, or the position of the _silent.
724         */
725         DCPTime pull_to = _playback_length;
726         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
727                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
728                         pull_to = i->second.last_push_end;
729                 }
730         }
731         if (!_silent.done() && _silent.position() < pull_to) {
732                 pull_to = _silent.position();
733         }
734
735         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
736         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
737         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
738                 if (_last_audio_time && i->second < *_last_audio_time) {
739                         /* This new data comes before the last we emitted (or the last seek); discard it */
740                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
741                         if (!cut.first) {
742                                 continue;
743                         }
744                         *i = cut;
745                 } else if (_last_audio_time && i->second > *_last_audio_time) {
746                         /* There's a gap between this data and the last we emitted; fill with silence */
747                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
748                 }
749
750                 emit_audio (i->first, i->second);
751         }
752
753         if (done) {
754                 _shuffler->flush ();
755                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
756                         do_emit_video(i->first, i->second);
757                 }
758         }
759
760         return done;
761 }
762
763 /** @return Open subtitles for the frame at the given time, converted to images */
764 optional<PositionImage>
765 Player::open_subtitles_for_frame (DCPTime time) const
766 {
767         list<PositionImage> captions;
768         int const vfr = _film->video_frame_rate();
769
770         BOOST_FOREACH (
771                 PlayerText j,
772                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
773                 ) {
774
775                 /* Bitmap subtitles */
776                 BOOST_FOREACH (BitmapText i, j.bitmap) {
777                         if (!i.image) {
778                                 continue;
779                         }
780
781                         /* i.image will already have been scaled to fit _video_container_size */
782                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
783
784                         captions.push_back (
785                                 PositionImage (
786                                         i.image,
787                                         Position<int> (
788                                                 lrint (_video_container_size.width * i.rectangle.x),
789                                                 lrint (_video_container_size.height * i.rectangle.y)
790                                                 )
791                                         )
792                                 );
793                 }
794
795                 /* String subtitles (rendered to an image) */
796                 if (!j.string.empty ()) {
797                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
798                         copy (s.begin(), s.end(), back_inserter (captions));
799                 }
800         }
801
802         if (captions.empty ()) {
803                 return optional<PositionImage> ();
804         }
805
806         return merge (captions);
807 }
808
809 void
810 Player::video (weak_ptr<Piece> wp, ContentVideo video)
811 {
812         shared_ptr<Piece> piece = wp.lock ();
813         if (!piece) {
814                 return;
815         }
816
817         if (!piece->content->video->use()) {
818                 return;
819         }
820
821         FrameRateChange frc (_film, piece->content);
822         if (frc.skip && (video.frame % 2) == 1) {
823                 return;
824         }
825
826         /* Time of the first frame we will emit */
827         DCPTime const time = content_video_to_dcp (piece, video.frame);
828         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
829
830         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
831            if it's after the content's period here as in that case we still need to fill any gap between
832            `now' and the end of the content's period.
833         */
834         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
835                 return;
836         }
837
838         /* Fill gaps that we discover now that we have some video which needs to be emitted.
839            This is where we need to fill to.
840         */
841         DCPTime fill_to = min (time, piece->content->end(_film));
842
843         if (_last_video_time) {
844                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
845
846                 /* Fill if we have more than half a frame to do */
847                 if ((fill_to - fill_from) > one_video_frame() / 2) {
848                         LastVideoMap::const_iterator last = _last_video.find (wp);
849                         if (_film->three_d()) {
850                                 Eyes fill_to_eyes = video.eyes;
851                                 if (fill_to_eyes == EYES_BOTH) {
852                                         fill_to_eyes = EYES_LEFT;
853                                 }
854                                 if (fill_to == piece->content->end(_film)) {
855                                         /* Don't fill after the end of the content */
856                                         fill_to_eyes = EYES_LEFT;
857                                 }
858                                 DCPTime j = fill_from;
859                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
860                                 if (eyes == EYES_BOTH) {
861                                         eyes = EYES_LEFT;
862                                 }
863                                 while (j < fill_to || eyes != fill_to_eyes) {
864                                         if (last != _last_video.end()) {
865                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
866                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
867                                                 copy->set_eyes (eyes);
868                                                 emit_video (copy, j);
869                                         } else {
870                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
871                                                 emit_video (black_player_video_frame(eyes), j);
872                                         }
873                                         if (eyes == EYES_RIGHT) {
874                                                 j += one_video_frame();
875                                         }
876                                         eyes = increment_eyes (eyes);
877                                 }
878                         } else {
879                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
880                                         if (last != _last_video.end()) {
881                                                 emit_video (last->second, j);
882                                         } else {
883                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
884                                         }
885                                 }
886                         }
887                 }
888         }
889
890         _last_video[wp].reset (
891                 new PlayerVideo (
892                         video.image,
893                         piece->content->video->crop (),
894                         piece->content->video->fade (_film, video.frame),
895                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
896                         _video_container_size,
897                         video.eyes,
898                         video.part,
899                         piece->content->video->colour_conversion(),
900                         piece->content->video->range(),
901                         piece->content,
902                         video.frame,
903                         false
904                         )
905                 );
906
907         DCPTime t = time;
908         for (int i = 0; i < frc.repeat; ++i) {
909                 if (t < piece->content->end(_film)) {
910                         emit_video (_last_video[wp], t);
911                 }
912                 t += one_video_frame ();
913         }
914 }
915
916 void
917 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
918 {
919         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
920
921         shared_ptr<Piece> piece = wp.lock ();
922         if (!piece) {
923                 return;
924         }
925
926         shared_ptr<AudioContent> content = piece->content->audio;
927         DCPOMATIC_ASSERT (content);
928
929         int const rfr = content->resampled_frame_rate (_film);
930
931         /* Compute time in the DCP */
932         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
933         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
934
935         /* And the end of this block in the DCP */
936         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
937
938         /* Remove anything that comes before the start or after the end of the content */
939         if (time < piece->content->position()) {
940                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
941                 if (!cut.first) {
942                         /* This audio is entirely discarded */
943                         return;
944                 }
945                 content_audio.audio = cut.first;
946                 time = cut.second;
947         } else if (time > piece->content->end(_film)) {
948                 /* Discard it all */
949                 return;
950         } else if (end > piece->content->end(_film)) {
951                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
952                 if (remaining_frames == 0) {
953                         return;
954                 }
955                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
956         }
957
958         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
959
960         /* Gain */
961
962         if (content->gain() != 0) {
963                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
964                 gain->apply_gain (content->gain ());
965                 content_audio.audio = gain;
966         }
967
968         /* Remap */
969
970         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
971
972         /* Process */
973
974         if (_audio_processor) {
975                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
976         }
977
978         /* Push */
979
980         _audio_merger.push (content_audio.audio, time);
981         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
982         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
983 }
984
985 void
986 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
987 {
988         shared_ptr<Piece> piece = wp.lock ();
989         shared_ptr<const TextContent> text = wc.lock ();
990         if (!piece || !text) {
991                 return;
992         }
993
994         /* Apply content's subtitle offsets */
995         subtitle.sub.rectangle.x += text->x_offset ();
996         subtitle.sub.rectangle.y += text->y_offset ();
997
998         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
999         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1000         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1001
1002         /* Apply content's subtitle scale */
1003         subtitle.sub.rectangle.width *= text->x_scale ();
1004         subtitle.sub.rectangle.height *= text->y_scale ();
1005
1006         PlayerText ps;
1007         shared_ptr<Image> image = subtitle.sub.image;
1008
1009         /* We will scale the subtitle up to fit _video_container_size */
1010         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1011         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1012         if (width == 0 || height == 0) {
1013                 return;
1014         }
1015
1016         dcp::Size scaled_size (width, height);
1017         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1018         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1019
1020         _active_texts[text->type()].add_from (wc, ps, from);
1021 }
1022
1023 void
1024 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1025 {
1026         shared_ptr<Piece> piece = wp.lock ();
1027         shared_ptr<const TextContent> text = wc.lock ();
1028         if (!piece || !text) {
1029                 return;
1030         }
1031
1032         PlayerText ps;
1033         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1034
1035         if (from > piece->content->end(_film)) {
1036                 return;
1037         }
1038
1039         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1040                 s.set_h_position (s.h_position() + text->x_offset ());
1041                 s.set_v_position (s.v_position() + text->y_offset ());
1042                 float const xs = text->x_scale();
1043                 float const ys = text->y_scale();
1044                 float size = s.size();
1045
1046                 /* Adjust size to express the common part of the scaling;
1047                    e.g. if xs = ys = 0.5 we scale size by 2.
1048                 */
1049                 if (xs > 1e-5 && ys > 1e-5) {
1050                         size *= 1 / min (1 / xs, 1 / ys);
1051                 }
1052                 s.set_size (size);
1053
1054                 /* Then express aspect ratio changes */
1055                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1056                         s.set_aspect_adjust (xs / ys);
1057                 }
1058
1059                 s.set_in (dcp::Time(from.seconds(), 1000));
1060                 ps.string.push_back (StringText (s, text->outline_width()));
1061                 ps.add_fonts (text->fonts ());
1062         }
1063
1064         _active_texts[text->type()].add_from (wc, ps, from);
1065 }
1066
1067 void
1068 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1069 {
1070         shared_ptr<const TextContent> text = wc.lock ();
1071         if (!text) {
1072                 return;
1073         }
1074
1075         if (!_active_texts[text->type()].have(wc)) {
1076                 return;
1077         }
1078
1079         shared_ptr<Piece> piece = wp.lock ();
1080         if (!piece) {
1081                 return;
1082         }
1083
1084         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1085
1086         if (dcp_to > piece->content->end(_film)) {
1087                 return;
1088         }
1089
1090         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1091
1092         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1093         if (text->use() && !always && !text->burn()) {
1094                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1095         }
1096 }
1097
1098 void
1099 Player::seek (DCPTime time, bool accurate)
1100 {
1101         boost::mutex::scoped_lock lm (_mutex);
1102         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1103
1104         if (_suspended) {
1105                 /* We can't seek in this state */
1106                 return;
1107         }
1108
1109         if (_shuffler) {
1110                 _shuffler->clear ();
1111         }
1112
1113         _delay.clear ();
1114
1115         if (_audio_processor) {
1116                 _audio_processor->flush ();
1117         }
1118
1119         _audio_merger.clear ();
1120         for (int i = 0; i < TEXT_COUNT; ++i) {
1121                 _active_texts[i].clear ();
1122         }
1123
1124         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1125                 if (time < i->content->position()) {
1126                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1127                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1128                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1129                            been trimmed to a point between keyframes, or something).
1130                         */
1131                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1132                         i->done = false;
1133                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1134                         /* During; seek to position */
1135                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1136                         i->done = false;
1137                 } else {
1138                         /* After; this piece is done */
1139                         i->done = true;
1140                 }
1141         }
1142
1143         if (accurate) {
1144                 _last_video_time = time;
1145                 _last_video_eyes = EYES_LEFT;
1146                 _last_audio_time = time;
1147         } else {
1148                 _last_video_time = optional<DCPTime>();
1149                 _last_video_eyes = optional<Eyes>();
1150                 _last_audio_time = optional<DCPTime>();
1151         }
1152
1153         _black.set_position (time);
1154         _silent.set_position (time);
1155
1156         _last_video.clear ();
1157 }
1158
1159 void
1160 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1161 {
1162         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1163            player before the video that requires them.
1164         */
1165         _delay.push_back (make_pair (pv, time));
1166
1167         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1168                 _last_video_time = time + one_video_frame();
1169         }
1170         _last_video_eyes = increment_eyes (pv->eyes());
1171
1172         if (_delay.size() < 3) {
1173                 return;
1174         }
1175
1176         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1177         _delay.pop_front();
1178         do_emit_video (to_do.first, to_do.second);
1179 }
1180
1181 void
1182 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1183 {
1184         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1185                 for (int i = 0; i < TEXT_COUNT; ++i) {
1186                         _active_texts[i].clear_before (time);
1187                 }
1188         }
1189
1190         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1191         if (subtitles) {
1192                 pv->set_text (subtitles.get ());
1193         }
1194
1195         Video (pv, time);
1196 }
1197
1198 void
1199 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1200 {
1201         /* Log if the assert below is about to fail */
1202         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1203                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1204         }
1205
1206         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1207         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1208         Audio (data, time, _film->audio_frame_rate());
1209         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1210 }
1211
1212 void
1213 Player::fill_audio (DCPTimePeriod period)
1214 {
1215         if (period.from == period.to) {
1216                 return;
1217         }
1218
1219         DCPOMATIC_ASSERT (period.from < period.to);
1220
1221         DCPTime t = period.from;
1222         while (t < period.to) {
1223                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1224                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1225                 if (samples) {
1226                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1227                         silence->make_silent ();
1228                         emit_audio (silence, t);
1229                 }
1230                 t += block;
1231         }
1232 }
1233
1234 DCPTime
1235 Player::one_video_frame () const
1236 {
1237         return DCPTime::from_frames (1, _film->video_frame_rate ());
1238 }
1239
1240 pair<shared_ptr<AudioBuffers>, DCPTime>
1241 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1242 {
1243         DCPTime const discard_time = discard_to - time;
1244         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1245         Frame remaining_frames = audio->frames() - discard_frames;
1246         if (remaining_frames <= 0) {
1247                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1248         }
1249         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1250         return make_pair(cut, time + discard_time);
1251 }
1252
1253 void
1254 Player::set_dcp_decode_reduction (optional<int> reduction)
1255 {
1256         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1257
1258         {
1259                 boost::mutex::scoped_lock lm (_mutex);
1260
1261                 if (reduction == _dcp_decode_reduction) {
1262                         lm.unlock ();
1263                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1264                         return;
1265                 }
1266
1267                 _dcp_decode_reduction = reduction;
1268                 setup_pieces_unlocked ();
1269         }
1270
1271         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1272 }
1273
1274 optional<DCPTime>
1275 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1276 {
1277         boost::mutex::scoped_lock lm (_mutex);
1278
1279         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1280                 if (i->content == content) {
1281                         return content_time_to_dcp (i, t);
1282                 }
1283         }
1284
1285         /* We couldn't find this content; perhaps things are being changed over */
1286         return optional<DCPTime>();
1287 }
1288
1289
1290 shared_ptr<const Playlist>
1291 Player::playlist () const
1292 {
1293         return _playlist ? _playlist : _film->playlist();
1294 }
1295
1296
1297 void
1298 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1299 {
1300         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1301 }
1302