Cache film length for Player::pass.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2019 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _tolerant (film->tolerant())
97         , _play_referenced (false)
98         , _audio_merger (_film->audio_frame_rate())
99         , _shuffler (0)
100 {
101         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102         /* The butler must hear about this first, so since we are proxying this through to the butler we must
103            be first.
104         */
105         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107         set_video_container_size (_film->frame_size ());
108
109         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
110
111         setup_pieces ();
112         seek (DCPTime (), true);
113 }
114
115 Player::~Player ()
116 {
117         delete _shuffler;
118 }
119
120 void
121 Player::setup_pieces ()
122 {
123         boost::mutex::scoped_lock lm (_mutex);
124         setup_pieces_unlocked ();
125 }
126
127 bool
128 have_video (shared_ptr<Piece> piece)
129 {
130         return piece->decoder && piece->decoder->video;
131 }
132
133 bool
134 have_audio (shared_ptr<Piece> piece)
135 {
136         return piece->decoder && piece->decoder->audio;
137 }
138
139 void
140 Player::setup_pieces_unlocked ()
141 {
142         list<shared_ptr<Piece> > old_pieces = _pieces;
143         _pieces.clear ();
144
145         delete _shuffler;
146         _shuffler = new Shuffler();
147         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
148
149         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
150
151                 if (!i->paths_valid ()) {
152                         continue;
153                 }
154
155                 if (_ignore_video && _ignore_audio && i->text.empty()) {
156                         /* We're only interested in text and this content has none */
157                         continue;
158                 }
159
160                 shared_ptr<Decoder> old_decoder;
161                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162                         if (j->content == i) {
163                                 old_decoder = j->decoder;
164                                 break;
165                         }
166                 }
167
168                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169                 FrameRateChange frc (_film, i);
170
171                 if (!decoder) {
172                         /* Not something that we can decode; e.g. Atmos content */
173                         continue;
174                 }
175
176                 if (decoder->video && _ignore_video) {
177                         decoder->video->set_ignore (true);
178                 }
179
180                 if (decoder->audio && _ignore_audio) {
181                         decoder->audio->set_ignore (true);
182                 }
183
184                 if (_ignore_text) {
185                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186                                 i->set_ignore (true);
187                         }
188                 }
189
190                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
191                 if (dcp) {
192                         dcp->set_decode_referenced (_play_referenced);
193                         if (_play_referenced) {
194                                 dcp->set_forced_reduction (_dcp_decode_reduction);
195                         }
196                 }
197
198                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199                 _pieces.push_back (piece);
200
201                 if (decoder->video) {
202                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
205                         } else {
206                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
207                         }
208                 }
209
210                 if (decoder->audio) {
211                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
212                 }
213
214                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
215
216                 while (j != decoder->text.end()) {
217                         (*j)->BitmapStart.connect (
218                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219                                 );
220                         (*j)->PlainStart.connect (
221                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
222                                 );
223                         (*j)->Stop.connect (
224                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
225                                 );
226
227                         ++j;
228                 }
229         }
230
231         _stream_states.clear ();
232         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233                 if (i->content->audio) {
234                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235                                 _stream_states[j] = StreamState (i, i->content->position ());
236                         }
237                 }
238         }
239
240         _black = Empty (_film, _pieces, bind(&have_video, _1));
241         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
242
243         _last_video_time = DCPTime ();
244         _last_video_eyes = EYES_BOTH;
245         _last_audio_time = DCPTime ();
246
247         /* Cached value to save recalculating it on every ::pass */
248         _film_length = _film->length ();
249 }
250
251 void
252 Player::playlist_content_change (ChangeType type, int property, bool frequent)
253 {
254         if (type == CHANGE_TYPE_PENDING) {
255                 boost::mutex::scoped_lock lm (_mutex);
256                 /* The player content is probably about to change, so we can't carry on
257                    until that has happened and we've rebuilt our pieces.  Stop pass()
258                    and seek() from working until then.
259                 */
260                 _suspended = true;
261         } else if (type == CHANGE_TYPE_DONE) {
262                 /* A change in our content has gone through.  Re-build our pieces. */
263                 setup_pieces ();
264                 _suspended = false;
265         } else if (type == CHANGE_TYPE_CANCELLED) {
266                 boost::mutex::scoped_lock lm (_mutex);
267                 _suspended = false;
268         }
269
270         Change (type, property, frequent);
271 }
272
273 void
274 Player::set_video_container_size (dcp::Size s)
275 {
276         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
277
278         {
279                 boost::mutex::scoped_lock lm (_mutex);
280
281                 if (s == _video_container_size) {
282                         lm.unlock ();
283                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
284                         return;
285                 }
286
287                 _video_container_size = s;
288
289                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
290                 _black_image->make_black ();
291         }
292
293         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
294 }
295
296 void
297 Player::playlist_change (ChangeType type)
298 {
299         if (type == CHANGE_TYPE_DONE) {
300                 setup_pieces ();
301         }
302         Change (type, PlayerProperty::PLAYLIST, false);
303 }
304
305 void
306 Player::film_change (ChangeType type, Film::Property p)
307 {
308         /* Here we should notice Film properties that affect our output, and
309            alert listeners that our output now would be different to how it was
310            last time we were run.
311         */
312
313         if (p == Film::CONTAINER) {
314                 Change (type, PlayerProperty::FILM_CONTAINER, false);
315         } else if (p == Film::VIDEO_FRAME_RATE) {
316                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
317                    so we need new pieces here.
318                 */
319                 if (type == CHANGE_TYPE_DONE) {
320                         setup_pieces ();
321                 }
322                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
323         } else if (p == Film::AUDIO_PROCESSOR) {
324                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
325                         boost::mutex::scoped_lock lm (_mutex);
326                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
327                 }
328         } else if (p == Film::AUDIO_CHANNELS) {
329                 if (type == CHANGE_TYPE_DONE) {
330                         boost::mutex::scoped_lock lm (_mutex);
331                         _audio_merger.clear ();
332                 }
333         }
334 }
335
336 shared_ptr<PlayerVideo>
337 Player::black_player_video_frame (Eyes eyes) const
338 {
339         return shared_ptr<PlayerVideo> (
340                 new PlayerVideo (
341                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
342                         Crop (),
343                         optional<double> (),
344                         _video_container_size,
345                         _video_container_size,
346                         eyes,
347                         PART_WHOLE,
348                         PresetColourConversion::all().front().conversion,
349                         VIDEO_RANGE_FULL,
350                         boost::weak_ptr<Content>(),
351                         boost::optional<Frame>()
352                 )
353         );
354 }
355
356 Frame
357 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
358 {
359         DCPTime s = t - piece->content->position ();
360         s = min (piece->content->length_after_trim(_film), s);
361         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
362
363         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
364            then convert that ContentTime to frames at the content's rate.  However this fails for
365            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
366            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
367
368            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
369         */
370         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
371 }
372
373 DCPTime
374 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
375 {
376         /* See comment in dcp_to_content_video */
377         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
378         return d + piece->content->position();
379 }
380
381 Frame
382 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
383 {
384         DCPTime s = t - piece->content->position ();
385         s = min (piece->content->length_after_trim(_film), s);
386         /* See notes in dcp_to_content_video */
387         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
388 }
389
390 DCPTime
391 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
392 {
393         /* See comment in dcp_to_content_video */
394         return DCPTime::from_frames (f, _film->audio_frame_rate())
395                 - DCPTime (piece->content->trim_start(), piece->frc)
396                 + piece->content->position();
397 }
398
399 ContentTime
400 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
401 {
402         DCPTime s = t - piece->content->position ();
403         s = min (piece->content->length_after_trim(_film), s);
404         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
405 }
406
407 DCPTime
408 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
409 {
410         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
411 }
412
413 list<shared_ptr<Font> >
414 Player::get_subtitle_fonts ()
415 {
416         boost::mutex::scoped_lock lm (_mutex);
417
418         list<shared_ptr<Font> > fonts;
419         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
420                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
421                         /* XXX: things may go wrong if there are duplicate font IDs
422                            with different font files.
423                         */
424                         list<shared_ptr<Font> > f = j->fonts ();
425                         copy (f.begin(), f.end(), back_inserter (fonts));
426                 }
427         }
428
429         return fonts;
430 }
431
432 /** Set this player never to produce any video data */
433 void
434 Player::set_ignore_video ()
435 {
436         boost::mutex::scoped_lock lm (_mutex);
437         _ignore_video = true;
438         setup_pieces_unlocked ();
439 }
440
441 void
442 Player::set_ignore_audio ()
443 {
444         boost::mutex::scoped_lock lm (_mutex);
445         _ignore_audio = true;
446         setup_pieces_unlocked ();
447 }
448
449 void
450 Player::set_ignore_text ()
451 {
452         boost::mutex::scoped_lock lm (_mutex);
453         _ignore_text = true;
454         setup_pieces_unlocked ();
455 }
456
457 /** Set the player to always burn open texts into the image regardless of the content settings */
458 void
459 Player::set_always_burn_open_subtitles ()
460 {
461         boost::mutex::scoped_lock lm (_mutex);
462         _always_burn_open_subtitles = true;
463 }
464
465 /** Sets up the player to be faster, possibly at the expense of quality */
466 void
467 Player::set_fast ()
468 {
469         boost::mutex::scoped_lock lm (_mutex);
470         _fast = true;
471         setup_pieces_unlocked ();
472 }
473
474 void
475 Player::set_play_referenced ()
476 {
477         boost::mutex::scoped_lock lm (_mutex);
478         _play_referenced = true;
479         setup_pieces_unlocked ();
480 }
481
482 static void
483 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
484 {
485         DCPOMATIC_ASSERT (r);
486         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
487         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
488         if (r->actual_duration() > 0) {
489                 a.push_back (
490                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
491                         );
492         }
493 }
494
495 list<ReferencedReelAsset>
496 Player::get_reel_assets ()
497 {
498         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
499
500         list<ReferencedReelAsset> a;
501
502         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
503                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
504                 if (!j) {
505                         continue;
506                 }
507
508                 scoped_ptr<DCPDecoder> decoder;
509                 try {
510                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
511                 } catch (...) {
512                         return a;
513                 }
514
515                 DCPOMATIC_ASSERT (j->video_frame_rate ());
516                 double const cfr = j->video_frame_rate().get();
517                 Frame const trim_start = j->trim_start().frames_round (cfr);
518                 Frame const trim_end = j->trim_end().frames_round (cfr);
519                 int const ffr = _film->video_frame_rate ();
520
521                 /* position in the asset from the start */
522                 int64_t offset_from_start = 0;
523                 /* position in the asset from the end */
524                 int64_t offset_from_end = 0;
525                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
526                         /* Assume that main picture duration is the length of the reel */
527                         offset_from_end += k->main_picture()->actual_duration();
528                 }
529
530                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
531
532                         /* Assume that main picture duration is the length of the reel */
533                         int64_t const reel_duration = k->main_picture()->actual_duration();
534
535                         /* See doc/design/trim_reels.svg */
536                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
537                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
538
539                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
540                         if (j->reference_video ()) {
541                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
542                         }
543
544                         if (j->reference_audio ()) {
545                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
546                         }
547
548                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
549                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
550                         }
551
552                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
553                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
554                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
555                                 }
556                         }
557
558                         offset_from_start += reel_duration;
559                         offset_from_end -= reel_duration;
560                 }
561         }
562
563         return a;
564 }
565
566 bool
567 Player::pass ()
568 {
569         boost::mutex::scoped_lock lm (_mutex);
570         DCPOMATIC_ASSERT (_film_length);
571
572         if (_suspended) {
573                 /* We can't pass in this state */
574                 return false;
575         }
576
577         if (*_film_length == DCPTime()) {
578                 /* Special case of an empty Film; just give one black frame */
579                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
580                 return true;
581         }
582
583         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
584
585         shared_ptr<Piece> earliest_content;
586         optional<DCPTime> earliest_time;
587
588         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
589                 if (i->done) {
590                         continue;
591                 }
592
593                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
594                 if (t > i->content->end(_film)) {
595                         i->done = true;
596                 } else {
597
598                         /* Given two choices at the same time, pick the one with texts so we see it before
599                            the video.
600                         */
601                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
602                                 earliest_time = t;
603                                 earliest_content = i;
604                         }
605                 }
606         }
607
608         bool done = false;
609
610         enum {
611                 NONE,
612                 CONTENT,
613                 BLACK,
614                 SILENT
615         } which = NONE;
616
617         if (earliest_content) {
618                 which = CONTENT;
619         }
620
621         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
622                 earliest_time = _black.position ();
623                 which = BLACK;
624         }
625
626         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
627                 earliest_time = _silent.position ();
628                 which = SILENT;
629         }
630
631         switch (which) {
632         case CONTENT:
633         {
634                 earliest_content->done = earliest_content->decoder->pass ();
635                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
636                 if (dcp && !_play_referenced && dcp->reference_audio()) {
637                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
638                            to `hide' the fact that no audio was emitted during the referenced DCP (though
639                            we need to behave as though it was).
640                         */
641                         _last_audio_time = dcp->end (_film);
642                 }
643                 break;
644         }
645         case BLACK:
646                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
647                 _black.set_position (_black.position() + one_video_frame());
648                 break;
649         case SILENT:
650         {
651                 DCPTimePeriod period (_silent.period_at_position());
652                 if (_last_audio_time) {
653                         /* Sometimes the thing that happened last finishes fractionally before
654                            or after this silence.  Bodge the start time of the silence to fix it.
655                            I think this is nothing to worry about since we will just add or
656                            remove a little silence at the end of some content.
657                         */
658                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
659                         /* Let's not worry about less than a frame at 24fps */
660                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
661                         if (error >= too_much_error) {
662                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
663                         }
664                         DCPOMATIC_ASSERT (error < too_much_error);
665                         period.from = *_last_audio_time;
666                 }
667                 if (period.duration() > one_video_frame()) {
668                         period.to = period.from + one_video_frame();
669                 }
670                 fill_audio (period);
671                 _silent.set_position (period.to);
672                 break;
673         }
674         case NONE:
675                 done = true;
676                 break;
677         }
678
679         /* Emit any audio that is ready */
680
681         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
682            of our streams, or the position of the _silent.
683         */
684         DCPTime pull_to = *_film_length;
685         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
686                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
687                         pull_to = i->second.last_push_end;
688                 }
689         }
690         if (!_silent.done() && _silent.position() < pull_to) {
691                 pull_to = _silent.position();
692         }
693
694         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
695         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
696                 if (_last_audio_time && i->second < *_last_audio_time) {
697                         /* This new data comes before the last we emitted (or the last seek); discard it */
698                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
699                         if (!cut.first) {
700                                 continue;
701                         }
702                         *i = cut;
703                 } else if (_last_audio_time && i->second > *_last_audio_time) {
704                         /* There's a gap between this data and the last we emitted; fill with silence */
705                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
706                 }
707
708                 emit_audio (i->first, i->second);
709         }
710
711         if (done) {
712                 _shuffler->flush ();
713                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
714                         do_emit_video(i->first, i->second);
715                 }
716         }
717
718         return done;
719 }
720
721 /** @return Open subtitles for the frame at the given time, converted to images */
722 optional<PositionImage>
723 Player::open_subtitles_for_frame (DCPTime time) const
724 {
725         list<PositionImage> captions;
726         int const vfr = _film->video_frame_rate();
727
728         BOOST_FOREACH (
729                 PlayerText j,
730                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
731                 ) {
732
733                 /* Bitmap subtitles */
734                 BOOST_FOREACH (BitmapText i, j.bitmap) {
735                         if (!i.image) {
736                                 continue;
737                         }
738
739                         /* i.image will already have been scaled to fit _video_container_size */
740                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
741
742                         captions.push_back (
743                                 PositionImage (
744                                         i.image,
745                                         Position<int> (
746                                                 lrint (_video_container_size.width * i.rectangle.x),
747                                                 lrint (_video_container_size.height * i.rectangle.y)
748                                                 )
749                                         )
750                                 );
751                 }
752
753                 /* String subtitles (rendered to an image) */
754                 if (!j.string.empty ()) {
755                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
756                         copy (s.begin(), s.end(), back_inserter (captions));
757                 }
758         }
759
760         if (captions.empty ()) {
761                 return optional<PositionImage> ();
762         }
763
764         return merge (captions);
765 }
766
767 void
768 Player::video (weak_ptr<Piece> wp, ContentVideo video)
769 {
770         shared_ptr<Piece> piece = wp.lock ();
771         if (!piece) {
772                 return;
773         }
774
775         FrameRateChange frc (_film, piece->content);
776         if (frc.skip && (video.frame % 2) == 1) {
777                 return;
778         }
779
780         /* Time of the first frame we will emit */
781         DCPTime const time = content_video_to_dcp (piece, video.frame);
782
783         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
784            if it's after the content's period here as in that case we still need to fill any gap between
785            `now' and the end of the content's period.
786         */
787         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
788                 return;
789         }
790
791         /* Fill gaps that we discover now that we have some video which needs to be emitted.
792            This is where we need to fill to.
793         */
794         DCPTime fill_to = min (time, piece->content->end(_film));
795
796         if (_last_video_time) {
797                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
798
799                 /* Fill if we have more than half a frame to do */
800                 if ((fill_to - fill_from) > one_video_frame() / 2) {
801                         LastVideoMap::const_iterator last = _last_video.find (wp);
802                         if (_film->three_d()) {
803                                 Eyes fill_to_eyes = video.eyes;
804                                 if (fill_to_eyes == EYES_BOTH) {
805                                         fill_to_eyes = EYES_LEFT;
806                                 }
807                                 if (fill_to == piece->content->end(_film)) {
808                                         /* Don't fill after the end of the content */
809                                         fill_to_eyes = EYES_LEFT;
810                                 }
811                                 DCPTime j = fill_from;
812                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
813                                 if (eyes == EYES_BOTH) {
814                                         eyes = EYES_LEFT;
815                                 }
816                                 while (j < fill_to || eyes != fill_to_eyes) {
817                                         if (last != _last_video.end()) {
818                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
819                                                 copy->set_eyes (eyes);
820                                                 emit_video (copy, j);
821                                         } else {
822                                                 emit_video (black_player_video_frame(eyes), j);
823                                         }
824                                         if (eyes == EYES_RIGHT) {
825                                                 j += one_video_frame();
826                                         }
827                                         eyes = increment_eyes (eyes);
828                                 }
829                         } else {
830                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
831                                         if (last != _last_video.end()) {
832                                                 emit_video (last->second, j);
833                                         } else {
834                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
835                                         }
836                                 }
837                         }
838                 }
839         }
840
841         _last_video[wp].reset (
842                 new PlayerVideo (
843                         video.image,
844                         piece->content->video->crop (),
845                         piece->content->video->fade (_film, video.frame),
846                         piece->content->video->scale().size (
847                                 piece->content->video, _video_container_size, _film->frame_size ()
848                                 ),
849                         _video_container_size,
850                         video.eyes,
851                         video.part,
852                         piece->content->video->colour_conversion(),
853                         piece->content->video->range(),
854                         piece->content,
855                         video.frame
856                         )
857                 );
858
859         DCPTime t = time;
860         for (int i = 0; i < frc.repeat; ++i) {
861                 if (t < piece->content->end(_film)) {
862                         emit_video (_last_video[wp], t);
863                 }
864                 t += one_video_frame ();
865         }
866 }
867
868 void
869 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
870 {
871         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
872
873         shared_ptr<Piece> piece = wp.lock ();
874         if (!piece) {
875                 return;
876         }
877
878         shared_ptr<AudioContent> content = piece->content->audio;
879         DCPOMATIC_ASSERT (content);
880
881         int const rfr = content->resampled_frame_rate (_film);
882
883         /* Compute time in the DCP */
884         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
885         /* And the end of this block in the DCP */
886         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
887
888         /* Remove anything that comes before the start or after the end of the content */
889         if (time < piece->content->position()) {
890                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
891                 if (!cut.first) {
892                         /* This audio is entirely discarded */
893                         return;
894                 }
895                 content_audio.audio = cut.first;
896                 time = cut.second;
897         } else if (time > piece->content->end(_film)) {
898                 /* Discard it all */
899                 return;
900         } else if (end > piece->content->end(_film)) {
901                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
902                 if (remaining_frames == 0) {
903                         return;
904                 }
905                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
906                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
907                 content_audio.audio = cut;
908         }
909
910         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
911
912         /* Gain */
913
914         if (content->gain() != 0) {
915                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
916                 gain->apply_gain (content->gain ());
917                 content_audio.audio = gain;
918         }
919
920         /* Remap */
921
922         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
923
924         /* Process */
925
926         if (_audio_processor) {
927                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
928         }
929
930         /* Push */
931
932         _audio_merger.push (content_audio.audio, time);
933         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
934         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
935 }
936
937 void
938 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
939 {
940         shared_ptr<Piece> piece = wp.lock ();
941         shared_ptr<const TextContent> text = wc.lock ();
942         if (!piece || !text) {
943                 return;
944         }
945
946         /* Apply content's subtitle offsets */
947         subtitle.sub.rectangle.x += text->x_offset ();
948         subtitle.sub.rectangle.y += text->y_offset ();
949
950         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
951         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
952         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
953
954         /* Apply content's subtitle scale */
955         subtitle.sub.rectangle.width *= text->x_scale ();
956         subtitle.sub.rectangle.height *= text->y_scale ();
957
958         PlayerText ps;
959         shared_ptr<Image> image = subtitle.sub.image;
960         /* We will scale the subtitle up to fit _video_container_size */
961         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
962         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
963         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
964
965         _active_texts[text->type()].add_from (wc, ps, from);
966 }
967
968 void
969 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
970 {
971         shared_ptr<Piece> piece = wp.lock ();
972         shared_ptr<const TextContent> text = wc.lock ();
973         if (!piece || !text) {
974                 return;
975         }
976
977         PlayerText ps;
978         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
979
980         if (from > piece->content->end(_film)) {
981                 return;
982         }
983
984         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
985                 s.set_h_position (s.h_position() + text->x_offset ());
986                 s.set_v_position (s.v_position() + text->y_offset ());
987                 float const xs = text->x_scale();
988                 float const ys = text->y_scale();
989                 float size = s.size();
990
991                 /* Adjust size to express the common part of the scaling;
992                    e.g. if xs = ys = 0.5 we scale size by 2.
993                 */
994                 if (xs > 1e-5 && ys > 1e-5) {
995                         size *= 1 / min (1 / xs, 1 / ys);
996                 }
997                 s.set_size (size);
998
999                 /* Then express aspect ratio changes */
1000                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1001                         s.set_aspect_adjust (xs / ys);
1002                 }
1003
1004                 s.set_in (dcp::Time(from.seconds(), 1000));
1005                 ps.string.push_back (StringText (s, text->outline_width()));
1006                 ps.add_fonts (text->fonts ());
1007         }
1008
1009         _active_texts[text->type()].add_from (wc, ps, from);
1010 }
1011
1012 void
1013 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1014 {
1015         shared_ptr<const TextContent> text = wc.lock ();
1016         if (!text) {
1017                 return;
1018         }
1019
1020         if (!_active_texts[text->type()].have(wc)) {
1021                 return;
1022         }
1023
1024         shared_ptr<Piece> piece = wp.lock ();
1025         if (!piece) {
1026                 return;
1027         }
1028
1029         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1030
1031         if (dcp_to > piece->content->end(_film)) {
1032                 return;
1033         }
1034
1035         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1036
1037         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1038         if (text->use() && !always && !text->burn()) {
1039                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1040         }
1041 }
1042
1043 void
1044 Player::seek (DCPTime time, bool accurate)
1045 {
1046         boost::mutex::scoped_lock lm (_mutex);
1047
1048         if (_suspended) {
1049                 /* We can't seek in this state */
1050                 return;
1051         }
1052
1053         if (_shuffler) {
1054                 _shuffler->clear ();
1055         }
1056
1057         _delay.clear ();
1058
1059         if (_audio_processor) {
1060                 _audio_processor->flush ();
1061         }
1062
1063         _audio_merger.clear ();
1064         for (int i = 0; i < TEXT_COUNT; ++i) {
1065                 _active_texts[i].clear ();
1066         }
1067
1068         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1069                 if (time < i->content->position()) {
1070                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1071                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1072                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1073                            been trimmed to a point between keyframes, or something).
1074                         */
1075                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1076                         i->done = false;
1077                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1078                         /* During; seek to position */
1079                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1080                         i->done = false;
1081                 } else {
1082                         /* After; this piece is done */
1083                         i->done = true;
1084                 }
1085         }
1086
1087         if (accurate) {
1088                 _last_video_time = time;
1089                 _last_video_eyes = EYES_LEFT;
1090                 _last_audio_time = time;
1091         } else {
1092                 _last_video_time = optional<DCPTime>();
1093                 _last_video_eyes = optional<Eyes>();
1094                 _last_audio_time = optional<DCPTime>();
1095         }
1096
1097         _black.set_position (time);
1098         _silent.set_position (time);
1099
1100         _last_video.clear ();
1101 }
1102
1103 void
1104 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1105 {
1106         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1107            player before the video that requires them.
1108         */
1109         _delay.push_back (make_pair (pv, time));
1110
1111         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1112                 _last_video_time = time + one_video_frame();
1113         }
1114         _last_video_eyes = increment_eyes (pv->eyes());
1115
1116         if (_delay.size() < 3) {
1117                 return;
1118         }
1119
1120         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1121         _delay.pop_front();
1122         do_emit_video (to_do.first, to_do.second);
1123 }
1124
1125 void
1126 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1127 {
1128         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1129                 for (int i = 0; i < TEXT_COUNT; ++i) {
1130                         _active_texts[i].clear_before (time);
1131                 }
1132         }
1133
1134         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1135         if (subtitles) {
1136                 pv->set_text (subtitles.get ());
1137         }
1138
1139         Video (pv, time);
1140 }
1141
1142 void
1143 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1144 {
1145         /* Log if the assert below is about to fail */
1146         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1147                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1148         }
1149
1150         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1151         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1152         Audio (data, time, _film->audio_frame_rate());
1153         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1154 }
1155
1156 void
1157 Player::fill_audio (DCPTimePeriod period)
1158 {
1159         if (period.from == period.to) {
1160                 return;
1161         }
1162
1163         DCPOMATIC_ASSERT (period.from < period.to);
1164
1165         DCPTime t = period.from;
1166         while (t < period.to) {
1167                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1168                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1169                 if (samples) {
1170                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1171                         silence->make_silent ();
1172                         emit_audio (silence, t);
1173                 }
1174                 t += block;
1175         }
1176 }
1177
1178 DCPTime
1179 Player::one_video_frame () const
1180 {
1181         return DCPTime::from_frames (1, _film->video_frame_rate ());
1182 }
1183
1184 pair<shared_ptr<AudioBuffers>, DCPTime>
1185 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1186 {
1187         DCPTime const discard_time = discard_to - time;
1188         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1189         Frame remaining_frames = audio->frames() - discard_frames;
1190         if (remaining_frames <= 0) {
1191                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1192         }
1193         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1194         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1195         return make_pair(cut, time + discard_time);
1196 }
1197
1198 void
1199 Player::set_dcp_decode_reduction (optional<int> reduction)
1200 {
1201         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1202
1203         {
1204                 boost::mutex::scoped_lock lm (_mutex);
1205
1206                 if (reduction == _dcp_decode_reduction) {
1207                         lm.unlock ();
1208                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1209                         return;
1210                 }
1211
1212                 _dcp_decode_reduction = reduction;
1213                 setup_pieces_unlocked ();
1214         }
1215
1216         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1217 }
1218
1219 optional<DCPTime>
1220 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1221 {
1222         boost::mutex::scoped_lock lm (_mutex);
1223
1224         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1225                 if (i->content == content) {
1226                         return content_time_to_dcp (i, t);
1227                 }
1228         }
1229
1230         /* We couldn't find this content; perhaps things are being changed over */
1231         return optional<DCPTime>();
1232 }