Put Time types in dcpomatic namespace.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 using std::list;
64 using std::cout;
65 using std::min;
66 using std::max;
67 using std::min;
68 using std::vector;
69 using std::pair;
70 using std::map;
71 using std::make_pair;
72 using std::copy;
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
78 using namespace dcpomatic;
79
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87         : _film (film)
88         , _playlist (playlist)
89         , _suspended (false)
90         , _ignore_video (false)
91         , _ignore_audio (false)
92         , _ignore_text (false)
93         , _always_burn_open_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
100         /* The butler must hear about this first, so since we are proxying this through to the butler we must
101            be first.
102         */
103         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
104         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
105         set_video_container_size (_film->frame_size ());
106
107         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
108
109         setup_pieces ();
110         seek (DCPTime (), true);
111 }
112
113 Player::~Player ()
114 {
115         delete _shuffler;
116 }
117
118 void
119 Player::setup_pieces ()
120 {
121         boost::mutex::scoped_lock lm (_mutex);
122         setup_pieces_unlocked ();
123 }
124
125 bool
126 have_video (shared_ptr<Piece> piece)
127 {
128         return piece->decoder && piece->decoder->video;
129 }
130
131 bool
132 have_audio (shared_ptr<Piece> piece)
133 {
134         return piece->decoder && piece->decoder->audio;
135 }
136
137 void
138 Player::setup_pieces_unlocked ()
139 {
140         _pieces.clear ();
141
142         delete _shuffler;
143         _shuffler = new Shuffler();
144         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
145
146         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
147
148                 if (!i->paths_valid ()) {
149                         continue;
150                 }
151
152                 if (_ignore_video && _ignore_audio && i->text.empty()) {
153                         /* We're only interested in text and this content has none */
154                         continue;
155                 }
156
157                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
158                 FrameRateChange frc (_film, i);
159
160                 if (!decoder) {
161                         /* Not something that we can decode; e.g. Atmos content */
162                         continue;
163                 }
164
165                 if (decoder->video && _ignore_video) {
166                         decoder->video->set_ignore (true);
167                 }
168
169                 if (decoder->audio && _ignore_audio) {
170                         decoder->audio->set_ignore (true);
171                 }
172
173                 if (_ignore_text) {
174                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
175                                 i->set_ignore (true);
176                         }
177                 }
178
179                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
180                 if (dcp) {
181                         dcp->set_decode_referenced (_play_referenced);
182                         if (_play_referenced) {
183                                 dcp->set_forced_reduction (_dcp_decode_reduction);
184                         }
185                 }
186
187                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
188                 _pieces.push_back (piece);
189
190                 if (decoder->video) {
191                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
192                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
193                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
194                         } else {
195                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
196                         }
197                 }
198
199                 if (decoder->audio) {
200                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
201                 }
202
203                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
204
205                 while (j != decoder->text.end()) {
206                         (*j)->BitmapStart.connect (
207                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
208                                 );
209                         (*j)->PlainStart.connect (
210                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
211                                 );
212                         (*j)->Stop.connect (
213                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
214                                 );
215
216                         ++j;
217                 }
218         }
219
220         _stream_states.clear ();
221         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
222                 if (i->content->audio) {
223                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
224                                 _stream_states[j] = StreamState (i, i->content->position ());
225                         }
226                 }
227         }
228
229         _black = Empty (_film, _pieces, bind(&have_video, _1));
230         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
231
232         _last_video_time = DCPTime ();
233         _last_video_eyes = EYES_BOTH;
234         _last_audio_time = DCPTime ();
235 }
236
237 void
238 Player::playlist_content_change (ChangeType type, int property, bool frequent)
239 {
240         if (type == CHANGE_TYPE_PENDING) {
241                 boost::mutex::scoped_lock lm (_mutex);
242                 /* The player content is probably about to change, so we can't carry on
243                    until that has happened and we've rebuilt our pieces.  Stop pass()
244                    and seek() from working until then.
245                 */
246                 _suspended = true;
247         } else if (type == CHANGE_TYPE_DONE) {
248                 /* A change in our content has gone through.  Re-build our pieces. */
249                 setup_pieces ();
250                 _suspended = false;
251         } else if (type == CHANGE_TYPE_CANCELLED) {
252                 boost::mutex::scoped_lock lm (_mutex);
253                 _suspended = false;
254         }
255
256         Change (type, property, frequent);
257 }
258
259 void
260 Player::set_video_container_size (dcp::Size s)
261 {
262         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
263
264         {
265                 boost::mutex::scoped_lock lm (_mutex);
266
267                 if (s == _video_container_size) {
268                         lm.unlock ();
269                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
270                         return;
271                 }
272
273                 _video_container_size = s;
274
275                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
276                 _black_image->make_black ();
277         }
278
279         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
280 }
281
282 void
283 Player::playlist_change (ChangeType type)
284 {
285         if (type == CHANGE_TYPE_DONE) {
286                 setup_pieces ();
287         }
288         Change (type, PlayerProperty::PLAYLIST, false);
289 }
290
291 void
292 Player::film_change (ChangeType type, Film::Property p)
293 {
294         /* Here we should notice Film properties that affect our output, and
295            alert listeners that our output now would be different to how it was
296            last time we were run.
297         */
298
299         if (p == Film::CONTAINER) {
300                 Change (type, PlayerProperty::FILM_CONTAINER, false);
301         } else if (p == Film::VIDEO_FRAME_RATE) {
302                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
303                    so we need new pieces here.
304                 */
305                 if (type == CHANGE_TYPE_DONE) {
306                         setup_pieces ();
307                 }
308                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
309         } else if (p == Film::AUDIO_PROCESSOR) {
310                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
311                         boost::mutex::scoped_lock lm (_mutex);
312                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
313                 }
314         } else if (p == Film::AUDIO_CHANNELS) {
315                 if (type == CHANGE_TYPE_DONE) {
316                         boost::mutex::scoped_lock lm (_mutex);
317                         _audio_merger.clear ();
318                 }
319         }
320 }
321
322 shared_ptr<PlayerVideo>
323 Player::black_player_video_frame (Eyes eyes) const
324 {
325         return shared_ptr<PlayerVideo> (
326                 new PlayerVideo (
327                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
328                         Crop (),
329                         optional<double> (),
330                         _video_container_size,
331                         _video_container_size,
332                         eyes,
333                         PART_WHOLE,
334                         PresetColourConversion::all().front().conversion,
335                         boost::weak_ptr<Content>(),
336                         boost::optional<Frame>()
337                 )
338         );
339 }
340
341 Frame
342 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
343 {
344         DCPTime s = t - piece->content->position ();
345         s = min (piece->content->length_after_trim(_film), s);
346         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
347
348         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
349            then convert that ContentTime to frames at the content's rate.  However this fails for
350            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
351            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
352
353            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
354         */
355         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
356 }
357
358 DCPTime
359 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
360 {
361         /* See comment in dcp_to_content_video */
362         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
363         return d + piece->content->position();
364 }
365
366 Frame
367 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
368 {
369         DCPTime s = t - piece->content->position ();
370         s = min (piece->content->length_after_trim(_film), s);
371         /* See notes in dcp_to_content_video */
372         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
373 }
374
375 DCPTime
376 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
377 {
378         /* See comment in dcp_to_content_video */
379         return DCPTime::from_frames (f, _film->audio_frame_rate())
380                 - DCPTime (piece->content->trim_start(), piece->frc)
381                 + piece->content->position();
382 }
383
384 ContentTime
385 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
386 {
387         DCPTime s = t - piece->content->position ();
388         s = min (piece->content->length_after_trim(_film), s);
389         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
390 }
391
392 DCPTime
393 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
394 {
395         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
396 }
397
398 list<shared_ptr<Font> >
399 Player::get_subtitle_fonts ()
400 {
401         boost::mutex::scoped_lock lm (_mutex);
402
403         list<shared_ptr<Font> > fonts;
404         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
405                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
406                         /* XXX: things may go wrong if there are duplicate font IDs
407                            with different font files.
408                         */
409                         list<shared_ptr<Font> > f = j->fonts ();
410                         copy (f.begin(), f.end(), back_inserter (fonts));
411                 }
412         }
413
414         return fonts;
415 }
416
417 /** Set this player never to produce any video data */
418 void
419 Player::set_ignore_video ()
420 {
421         boost::mutex::scoped_lock lm (_mutex);
422         _ignore_video = true;
423         setup_pieces_unlocked ();
424 }
425
426 void
427 Player::set_ignore_audio ()
428 {
429         boost::mutex::scoped_lock lm (_mutex);
430         _ignore_audio = true;
431         setup_pieces_unlocked ();
432 }
433
434 void
435 Player::set_ignore_text ()
436 {
437         boost::mutex::scoped_lock lm (_mutex);
438         _ignore_text = true;
439         setup_pieces_unlocked ();
440 }
441
442 /** Set the player to always burn open texts into the image regardless of the content settings */
443 void
444 Player::set_always_burn_open_subtitles ()
445 {
446         boost::mutex::scoped_lock lm (_mutex);
447         _always_burn_open_subtitles = true;
448 }
449
450 /** Sets up the player to be faster, possibly at the expense of quality */
451 void
452 Player::set_fast ()
453 {
454         boost::mutex::scoped_lock lm (_mutex);
455         _fast = true;
456         setup_pieces_unlocked ();
457 }
458
459 void
460 Player::set_play_referenced ()
461 {
462         boost::mutex::scoped_lock lm (_mutex);
463         _play_referenced = true;
464         setup_pieces_unlocked ();
465 }
466
467 static void
468 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
469 {
470         DCPOMATIC_ASSERT (r);
471         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
472         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
473         if (r->actual_duration() > 0) {
474                 a.push_back (
475                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
476                         );
477         }
478 }
479
480 list<ReferencedReelAsset>
481 Player::get_reel_assets ()
482 {
483         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
484
485         list<ReferencedReelAsset> a;
486
487         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
488                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
489                 if (!j) {
490                         continue;
491                 }
492
493                 scoped_ptr<DCPDecoder> decoder;
494                 try {
495                         decoder.reset (new DCPDecoder (_film, j, false));
496                 } catch (...) {
497                         return a;
498                 }
499
500                 DCPOMATIC_ASSERT (j->video_frame_rate ());
501                 double const cfr = j->video_frame_rate().get();
502                 Frame const trim_start = j->trim_start().frames_round (cfr);
503                 Frame const trim_end = j->trim_end().frames_round (cfr);
504                 int const ffr = _film->video_frame_rate ();
505
506                 /* position in the asset from the start */
507                 int64_t offset_from_start = 0;
508                 /* position in the asset from the end */
509                 int64_t offset_from_end = 0;
510                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
511                         /* Assume that main picture duration is the length of the reel */
512                         offset_from_end += k->main_picture()->actual_duration();
513                 }
514
515                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
516
517                         /* Assume that main picture duration is the length of the reel */
518                         int64_t const reel_duration = k->main_picture()->actual_duration();
519
520                         /* See doc/design/trim_reels.svg */
521                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
522                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
523
524                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
525                         if (j->reference_video ()) {
526                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
527                         }
528
529                         if (j->reference_audio ()) {
530                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
531                         }
532
533                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
534                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
535                         }
536
537                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
538                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
539                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
540                                 }
541                         }
542
543                         offset_from_start += reel_duration;
544                         offset_from_end -= reel_duration;
545                 }
546         }
547
548         return a;
549 }
550
551 bool
552 Player::pass ()
553 {
554         boost::mutex::scoped_lock lm (_mutex);
555
556         if (_suspended) {
557                 /* We can't pass in this state */
558                 return false;
559         }
560
561         if (_playlist->length(_film) == DCPTime()) {
562                 /* Special case of an empty Film; just give one black frame */
563                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
564                 return true;
565         }
566
567         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
568
569         shared_ptr<Piece> earliest_content;
570         optional<DCPTime> earliest_time;
571
572         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
573                 if (i->done) {
574                         continue;
575                 }
576
577                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
578                 if (t > i->content->end(_film)) {
579                         i->done = true;
580                 } else {
581
582                         /* Given two choices at the same time, pick the one with texts so we see it before
583                            the video.
584                         */
585                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
586                                 earliest_time = t;
587                                 earliest_content = i;
588                         }
589                 }
590         }
591
592         bool done = false;
593
594         enum {
595                 NONE,
596                 CONTENT,
597                 BLACK,
598                 SILENT
599         } which = NONE;
600
601         if (earliest_content) {
602                 which = CONTENT;
603         }
604
605         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
606                 earliest_time = _black.position ();
607                 which = BLACK;
608         }
609
610         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
611                 earliest_time = _silent.position ();
612                 which = SILENT;
613         }
614
615         switch (which) {
616         case CONTENT:
617         {
618                 earliest_content->done = earliest_content->decoder->pass ();
619                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
620                 if (dcp && !_play_referenced && dcp->reference_audio()) {
621                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
622                            to `hide' the fact that no audio was emitted during the referenced DCP (though
623                            we need to behave as though it was).
624                         */
625                         _last_audio_time = dcp->end (_film);
626                 }
627                 break;
628         }
629         case BLACK:
630                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
631                 _black.set_position (_black.position() + one_video_frame());
632                 break;
633         case SILENT:
634         {
635                 DCPTimePeriod period (_silent.period_at_position());
636                 if (_last_audio_time) {
637                         /* Sometimes the thing that happened last finishes fractionally before
638                            or after this silence.  Bodge the start time of the silence to fix it.
639                         */
640                         DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
641                         period.from = *_last_audio_time;
642                 }
643                 if (period.duration() > one_video_frame()) {
644                         period.to = period.from + one_video_frame();
645                 }
646                 fill_audio (period);
647                 _silent.set_position (period.to);
648                 break;
649         }
650         case NONE:
651                 done = true;
652                 break;
653         }
654
655         /* Emit any audio that is ready */
656
657         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
658            of our streams, or the position of the _silent.
659         */
660         DCPTime pull_to = _film->length ();
661         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
662                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
663                         pull_to = i->second.last_push_end;
664                 }
665         }
666         if (!_silent.done() && _silent.position() < pull_to) {
667                 pull_to = _silent.position();
668         }
669
670         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
671         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
672                 if (_last_audio_time && i->second < *_last_audio_time) {
673                         /* This new data comes before the last we emitted (or the last seek); discard it */
674                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
675                         if (!cut.first) {
676                                 continue;
677                         }
678                         *i = cut;
679                 } else if (_last_audio_time && i->second > *_last_audio_time) {
680                         /* There's a gap between this data and the last we emitted; fill with silence */
681                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
682                 }
683
684                 emit_audio (i->first, i->second);
685         }
686
687         if (done) {
688                 _shuffler->flush ();
689                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
690                         do_emit_video(i->first, i->second);
691                 }
692         }
693
694         return done;
695 }
696
697 /** @return Open subtitles for the frame at the given time, converted to images */
698 optional<PositionImage>
699 Player::open_subtitles_for_frame (DCPTime time) const
700 {
701         list<PositionImage> captions;
702         int const vfr = _film->video_frame_rate();
703
704         BOOST_FOREACH (
705                 PlayerText j,
706                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
707                 ) {
708
709                 /* Bitmap subtitles */
710                 BOOST_FOREACH (BitmapText i, j.bitmap) {
711                         if (!i.image) {
712                                 continue;
713                         }
714
715                         /* i.image will already have been scaled to fit _video_container_size */
716                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
717
718                         captions.push_back (
719                                 PositionImage (
720                                         i.image,
721                                         Position<int> (
722                                                 lrint (_video_container_size.width * i.rectangle.x),
723                                                 lrint (_video_container_size.height * i.rectangle.y)
724                                                 )
725                                         )
726                                 );
727                 }
728
729                 /* String subtitles (rendered to an image) */
730                 if (!j.string.empty ()) {
731                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
732                         copy (s.begin(), s.end(), back_inserter (captions));
733                 }
734         }
735
736         if (captions.empty ()) {
737                 return optional<PositionImage> ();
738         }
739
740         return merge (captions);
741 }
742
743 void
744 Player::video (weak_ptr<Piece> wp, ContentVideo video)
745 {
746         shared_ptr<Piece> piece = wp.lock ();
747         if (!piece) {
748                 return;
749         }
750
751         FrameRateChange frc (_film, piece->content);
752         if (frc.skip && (video.frame % 2) == 1) {
753                 return;
754         }
755
756         /* Time of the first frame we will emit */
757         DCPTime const time = content_video_to_dcp (piece, video.frame);
758
759         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
760            if it's after the content's period here as in that case we still need to fill any gap between
761            `now' and the end of the content's period.
762         */
763         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
764                 return;
765         }
766
767         /* Fill gaps that we discover now that we have some video which needs to be emitted.
768            This is where we need to fill to.
769         */
770         DCPTime fill_to = min (time, piece->content->end(_film));
771
772         if (_last_video_time) {
773                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
774
775                 /* Fill if we have more than half a frame to do */
776                 if ((fill_to - fill_from) > one_video_frame() / 2) {
777                         LastVideoMap::const_iterator last = _last_video.find (wp);
778                         if (_film->three_d()) {
779                                 Eyes fill_to_eyes = video.eyes;
780                                 if (fill_to_eyes == EYES_BOTH) {
781                                         fill_to_eyes = EYES_LEFT;
782                                 }
783                                 if (fill_to == piece->content->end(_film)) {
784                                         /* Don't fill after the end of the content */
785                                         fill_to_eyes = EYES_LEFT;
786                                 }
787                                 DCPTime j = fill_from;
788                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
789                                 if (eyes == EYES_BOTH) {
790                                         eyes = EYES_LEFT;
791                                 }
792                                 while (j < fill_to || eyes != fill_to_eyes) {
793                                         if (last != _last_video.end()) {
794                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
795                                                 copy->set_eyes (eyes);
796                                                 emit_video (copy, j);
797                                         } else {
798                                                 emit_video (black_player_video_frame(eyes), j);
799                                         }
800                                         if (eyes == EYES_RIGHT) {
801                                                 j += one_video_frame();
802                                         }
803                                         eyes = increment_eyes (eyes);
804                                 }
805                         } else {
806                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
807                                         if (last != _last_video.end()) {
808                                                 emit_video (last->second, j);
809                                         } else {
810                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
811                                         }
812                                 }
813                         }
814                 }
815         }
816
817         _last_video[wp].reset (
818                 new PlayerVideo (
819                         video.image,
820                         piece->content->video->crop (),
821                         piece->content->video->fade (_film, video.frame),
822                         piece->content->video->scale().size (
823                                 piece->content->video, _video_container_size, _film->frame_size ()
824                                 ),
825                         _video_container_size,
826                         video.eyes,
827                         video.part,
828                         piece->content->video->colour_conversion(),
829                         piece->content,
830                         video.frame
831                         )
832                 );
833
834         DCPTime t = time;
835         for (int i = 0; i < frc.repeat; ++i) {
836                 if (t < piece->content->end(_film)) {
837                         emit_video (_last_video[wp], t);
838                 }
839                 t += one_video_frame ();
840         }
841 }
842
843 void
844 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
845 {
846         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
847
848         shared_ptr<Piece> piece = wp.lock ();
849         if (!piece) {
850                 return;
851         }
852
853         shared_ptr<AudioContent> content = piece->content->audio;
854         DCPOMATIC_ASSERT (content);
855
856         /* Compute time in the DCP */
857         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
858         /* And the end of this block in the DCP */
859         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
860
861         /* Remove anything that comes before the start or after the end of the content */
862         if (time < piece->content->position()) {
863                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
864                 if (!cut.first) {
865                         /* This audio is entirely discarded */
866                         return;
867                 }
868                 content_audio.audio = cut.first;
869                 time = cut.second;
870         } else if (time > piece->content->end(_film)) {
871                 /* Discard it all */
872                 return;
873         } else if (end > piece->content->end(_film)) {
874                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
875                 if (remaining_frames == 0) {
876                         return;
877                 }
878                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
879                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
880                 content_audio.audio = cut;
881         }
882
883         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
884
885         /* Gain */
886
887         if (content->gain() != 0) {
888                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
889                 gain->apply_gain (content->gain ());
890                 content_audio.audio = gain;
891         }
892
893         /* Remap */
894
895         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
896
897         /* Process */
898
899         if (_audio_processor) {
900                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
901         }
902
903         /* Push */
904
905         _audio_merger.push (content_audio.audio, time);
906         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
907         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
908 }
909
910 void
911 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
912 {
913         shared_ptr<Piece> piece = wp.lock ();
914         shared_ptr<const TextContent> text = wc.lock ();
915         if (!piece || !text) {
916                 return;
917         }
918
919         /* Apply content's subtitle offsets */
920         subtitle.sub.rectangle.x += text->x_offset ();
921         subtitle.sub.rectangle.y += text->y_offset ();
922
923         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
924         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
925         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
926
927         /* Apply content's subtitle scale */
928         subtitle.sub.rectangle.width *= text->x_scale ();
929         subtitle.sub.rectangle.height *= text->y_scale ();
930
931         PlayerText ps;
932         shared_ptr<Image> image = subtitle.sub.image;
933         /* We will scale the subtitle up to fit _video_container_size */
934         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
935         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
936         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
937
938         _active_texts[text->type()].add_from (wc, ps, from);
939 }
940
941 void
942 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
943 {
944         shared_ptr<Piece> piece = wp.lock ();
945         shared_ptr<const TextContent> text = wc.lock ();
946         if (!piece || !text) {
947                 return;
948         }
949
950         PlayerText ps;
951         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
952
953         if (from > piece->content->end(_film)) {
954                 return;
955         }
956
957         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
958                 s.set_h_position (s.h_position() + text->x_offset ());
959                 s.set_v_position (s.v_position() + text->y_offset ());
960                 float const xs = text->x_scale();
961                 float const ys = text->y_scale();
962                 float size = s.size();
963
964                 /* Adjust size to express the common part of the scaling;
965                    e.g. if xs = ys = 0.5 we scale size by 2.
966                 */
967                 if (xs > 1e-5 && ys > 1e-5) {
968                         size *= 1 / min (1 / xs, 1 / ys);
969                 }
970                 s.set_size (size);
971
972                 /* Then express aspect ratio changes */
973                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
974                         s.set_aspect_adjust (xs / ys);
975                 }
976
977                 s.set_in (dcp::Time(from.seconds(), 1000));
978                 ps.string.push_back (StringText (s, text->outline_width()));
979                 ps.add_fonts (text->fonts ());
980         }
981
982         _active_texts[text->type()].add_from (wc, ps, from);
983 }
984
985 void
986 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
987 {
988         shared_ptr<const TextContent> text = wc.lock ();
989         if (!text) {
990                 return;
991         }
992
993         if (!_active_texts[text->type()].have(wc)) {
994                 return;
995         }
996
997         shared_ptr<Piece> piece = wp.lock ();
998         if (!piece) {
999                 return;
1000         }
1001
1002         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1003
1004         if (dcp_to > piece->content->end(_film)) {
1005                 return;
1006         }
1007
1008         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1009
1010         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1011         if (text->use() && !always && !text->burn()) {
1012                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1013         }
1014 }
1015
1016 void
1017 Player::seek (DCPTime time, bool accurate)
1018 {
1019         boost::mutex::scoped_lock lm (_mutex);
1020
1021         if (_suspended) {
1022                 /* We can't seek in this state */
1023                 return;
1024         }
1025
1026         if (_shuffler) {
1027                 _shuffler->clear ();
1028         }
1029
1030         _delay.clear ();
1031
1032         if (_audio_processor) {
1033                 _audio_processor->flush ();
1034         }
1035
1036         _audio_merger.clear ();
1037         for (int i = 0; i < TEXT_COUNT; ++i) {
1038                 _active_texts[i].clear ();
1039         }
1040
1041         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1042                 if (time < i->content->position()) {
1043                         /* Before; seek to the start of the content */
1044                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1045                         i->done = false;
1046                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1047                         /* During; seek to position */
1048                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1049                         i->done = false;
1050                 } else {
1051                         /* After; this piece is done */
1052                         i->done = true;
1053                 }
1054         }
1055
1056         if (accurate) {
1057                 _last_video_time = time;
1058                 _last_video_eyes = EYES_LEFT;
1059                 _last_audio_time = time;
1060         } else {
1061                 _last_video_time = optional<DCPTime>();
1062                 _last_video_eyes = optional<Eyes>();
1063                 _last_audio_time = optional<DCPTime>();
1064         }
1065
1066         _black.set_position (time);
1067         _silent.set_position (time);
1068
1069         _last_video.clear ();
1070 }
1071
1072 void
1073 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1074 {
1075         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1076            player before the video that requires them.
1077         */
1078         _delay.push_back (make_pair (pv, time));
1079
1080         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1081                 _last_video_time = time + one_video_frame();
1082         }
1083         _last_video_eyes = increment_eyes (pv->eyes());
1084
1085         if (_delay.size() < 3) {
1086                 return;
1087         }
1088
1089         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1090         _delay.pop_front();
1091         do_emit_video (to_do.first, to_do.second);
1092 }
1093
1094 void
1095 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1096 {
1097         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1098                 for (int i = 0; i < TEXT_COUNT; ++i) {
1099                         _active_texts[i].clear_before (time);
1100                 }
1101         }
1102
1103         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1104         if (subtitles) {
1105                 pv->set_text (subtitles.get ());
1106         }
1107
1108         Video (pv, time);
1109 }
1110
1111 void
1112 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1113 {
1114         /* Log if the assert below is about to fail */
1115         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1116                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1117         }
1118
1119         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1120         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1121         Audio (data, time, _film->audio_frame_rate());
1122         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1123 }
1124
1125 void
1126 Player::fill_audio (DCPTimePeriod period)
1127 {
1128         if (period.from == period.to) {
1129                 return;
1130         }
1131
1132         DCPOMATIC_ASSERT (period.from < period.to);
1133
1134         DCPTime t = period.from;
1135         while (t < period.to) {
1136                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1137                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1138                 if (samples) {
1139                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1140                         silence->make_silent ();
1141                         emit_audio (silence, t);
1142                 }
1143                 t += block;
1144         }
1145 }
1146
1147 DCPTime
1148 Player::one_video_frame () const
1149 {
1150         return DCPTime::from_frames (1, _film->video_frame_rate ());
1151 }
1152
1153 pair<shared_ptr<AudioBuffers>, DCPTime>
1154 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1155 {
1156         DCPTime const discard_time = discard_to - time;
1157         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1158         Frame remaining_frames = audio->frames() - discard_frames;
1159         if (remaining_frames <= 0) {
1160                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1161         }
1162         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1163         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1164         return make_pair(cut, time + discard_time);
1165 }
1166
1167 void
1168 Player::set_dcp_decode_reduction (optional<int> reduction)
1169 {
1170         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1171
1172         {
1173                 boost::mutex::scoped_lock lm (_mutex);
1174
1175                 if (reduction == _dcp_decode_reduction) {
1176                         lm.unlock ();
1177                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1178                         return;
1179                 }
1180
1181                 _dcp_decode_reduction = reduction;
1182                 setup_pieces_unlocked ();
1183         }
1184
1185         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1186 }
1187
1188 optional<DCPTime>
1189 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1190 {
1191         boost::mutex::scoped_lock lm (_mutex);
1192
1193         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1194                 if (i->content == content) {
1195                         return content_time_to_dcp (i, t);
1196                 }
1197         }
1198
1199         /* We couldn't find this content; perhaps things are being changed over */
1200         return optional<DCPTime>();
1201 }