Re-work idle handling from previous commit.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101         /* The butler must hear about this first, so since we are proxying this through to the butler we must
102            be first.
103         */
104         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106         set_video_container_size (_film->frame_size ());
107
108         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109
110         setup_pieces ();
111         seek (DCPTime (), true);
112 }
113
114 Player::~Player ()
115 {
116         delete _shuffler;
117 }
118
119 void
120 Player::setup_pieces ()
121 {
122         boost::mutex::scoped_lock lm (_mutex);
123         setup_pieces_unlocked ();
124 }
125
126 bool
127 have_video (shared_ptr<Piece> piece)
128 {
129         return piece->decoder && piece->decoder->video;
130 }
131
132 bool
133 have_audio (shared_ptr<Piece> piece)
134 {
135         return piece->decoder && piece->decoder->audio;
136 }
137
138 void
139 Player::setup_pieces_unlocked ()
140 {
141         _pieces.clear ();
142
143         delete _shuffler;
144         _shuffler = new Shuffler();
145         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
146
147         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
148
149                 if (!i->paths_valid ()) {
150                         continue;
151                 }
152
153                 if (_ignore_video && _ignore_audio && i->text.empty()) {
154                         /* We're only interested in text and this content has none */
155                         continue;
156                 }
157
158                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
159                 FrameRateChange frc (_film, i);
160
161                 if (!decoder) {
162                         /* Not something that we can decode; e.g. Atmos content */
163                         continue;
164                 }
165
166                 if (decoder->video && _ignore_video) {
167                         decoder->video->set_ignore (true);
168                 }
169
170                 if (decoder->audio && _ignore_audio) {
171                         decoder->audio->set_ignore (true);
172                 }
173
174                 if (_ignore_text) {
175                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
176                                 i->set_ignore (true);
177                         }
178                 }
179
180                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
181                 if (dcp) {
182                         dcp->set_decode_referenced (_play_referenced);
183                         if (_play_referenced) {
184                                 dcp->set_forced_reduction (_dcp_decode_reduction);
185                         }
186                 }
187
188                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
189                 _pieces.push_back (piece);
190
191                 if (decoder->video) {
192                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
193                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
194                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
195                         } else {
196                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
197                         }
198                 }
199
200                 if (decoder->audio) {
201                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
202                 }
203
204                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
205
206                 while (j != decoder->text.end()) {
207                         (*j)->BitmapStart.connect (
208                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
209                                 );
210                         (*j)->PlainStart.connect (
211                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
212                                 );
213                         (*j)->Stop.connect (
214                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
215                                 );
216
217                         ++j;
218                 }
219         }
220
221         _stream_states.clear ();
222         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
223                 if (i->content->audio) {
224                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
225                                 _stream_states[j] = StreamState (i, i->content->position ());
226                         }
227                 }
228         }
229
230         _black = Empty (_film, _pieces, bind(&have_video, _1));
231         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
232
233         _last_video_time = DCPTime ();
234         _last_video_eyes = EYES_BOTH;
235         _last_audio_time = DCPTime ();
236 }
237
238 void
239 Player::playlist_content_change (ChangeType type, int property, bool frequent)
240 {
241         if (type == CHANGE_TYPE_PENDING) {
242                 boost::mutex::scoped_lock lm (_mutex);
243                 /* The player content is probably about to change, so we can't carry on
244                    until that has happened and we've rebuilt our pieces.  Stop pass()
245                    and seek() from working until then.
246                 */
247                 _suspended = true;
248         } else if (type == CHANGE_TYPE_DONE) {
249                 /* A change in our content has gone through.  Re-build our pieces. */
250                 setup_pieces ();
251                 _suspended = false;
252         } else if (type == CHANGE_TYPE_CANCELLED) {
253                 boost::mutex::scoped_lock lm (_mutex);
254                 _suspended = false;
255         }
256
257         Change (type, property, frequent);
258 }
259
260 void
261 Player::set_video_container_size (dcp::Size s)
262 {
263         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
264
265         {
266                 boost::mutex::scoped_lock lm (_mutex);
267
268                 if (s == _video_container_size) {
269                         lm.unlock ();
270                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
271                         return;
272                 }
273
274                 _video_container_size = s;
275
276                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
277                 _black_image->make_black ();
278         }
279
280         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
281 }
282
283 void
284 Player::playlist_change (ChangeType type)
285 {
286         if (type == CHANGE_TYPE_DONE) {
287                 setup_pieces ();
288         }
289         Change (type, PlayerProperty::PLAYLIST, false);
290 }
291
292 void
293 Player::film_change (ChangeType type, Film::Property p)
294 {
295         /* Here we should notice Film properties that affect our output, and
296            alert listeners that our output now would be different to how it was
297            last time we were run.
298         */
299
300         if (p == Film::CONTAINER) {
301                 Change (type, PlayerProperty::FILM_CONTAINER, false);
302         } else if (p == Film::VIDEO_FRAME_RATE) {
303                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
304                    so we need new pieces here.
305                 */
306                 if (type == CHANGE_TYPE_DONE) {
307                         setup_pieces ();
308                 }
309                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
310         } else if (p == Film::AUDIO_PROCESSOR) {
311                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
312                         boost::mutex::scoped_lock lm (_mutex);
313                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
314                 }
315         } else if (p == Film::AUDIO_CHANNELS) {
316                 if (type == CHANGE_TYPE_DONE) {
317                         boost::mutex::scoped_lock lm (_mutex);
318                         _audio_merger.clear ();
319                 }
320         }
321 }
322
323 shared_ptr<PlayerVideo>
324 Player::black_player_video_frame (Eyes eyes) const
325 {
326         return shared_ptr<PlayerVideo> (
327                 new PlayerVideo (
328                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
329                         Crop (),
330                         optional<double> (),
331                         _video_container_size,
332                         _video_container_size,
333                         eyes,
334                         PART_WHOLE,
335                         PresetColourConversion::all().front().conversion,
336                         VIDEO_RANGE_FULL,
337                         boost::weak_ptr<Content>(),
338                         boost::optional<Frame>()
339                 )
340         );
341 }
342
343 Frame
344 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
345 {
346         DCPTime s = t - piece->content->position ();
347         s = min (piece->content->length_after_trim(_film), s);
348         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
349
350         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
351            then convert that ContentTime to frames at the content's rate.  However this fails for
352            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
353            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
354
355            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
356         */
357         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
358 }
359
360 DCPTime
361 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
362 {
363         /* See comment in dcp_to_content_video */
364         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
365         return d + piece->content->position();
366 }
367
368 Frame
369 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
370 {
371         DCPTime s = t - piece->content->position ();
372         s = min (piece->content->length_after_trim(_film), s);
373         /* See notes in dcp_to_content_video */
374         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
375 }
376
377 DCPTime
378 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
379 {
380         /* See comment in dcp_to_content_video */
381         return DCPTime::from_frames (f, _film->audio_frame_rate())
382                 - DCPTime (piece->content->trim_start(), piece->frc)
383                 + piece->content->position();
384 }
385
386 ContentTime
387 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
388 {
389         DCPTime s = t - piece->content->position ();
390         s = min (piece->content->length_after_trim(_film), s);
391         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
392 }
393
394 DCPTime
395 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
396 {
397         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
398 }
399
400 list<shared_ptr<Font> >
401 Player::get_subtitle_fonts ()
402 {
403         boost::mutex::scoped_lock lm (_mutex);
404
405         list<shared_ptr<Font> > fonts;
406         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
407                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
408                         /* XXX: things may go wrong if there are duplicate font IDs
409                            with different font files.
410                         */
411                         list<shared_ptr<Font> > f = j->fonts ();
412                         copy (f.begin(), f.end(), back_inserter (fonts));
413                 }
414         }
415
416         return fonts;
417 }
418
419 /** Set this player never to produce any video data */
420 void
421 Player::set_ignore_video ()
422 {
423         boost::mutex::scoped_lock lm (_mutex);
424         _ignore_video = true;
425         setup_pieces_unlocked ();
426 }
427
428 void
429 Player::set_ignore_audio ()
430 {
431         boost::mutex::scoped_lock lm (_mutex);
432         _ignore_audio = true;
433         setup_pieces_unlocked ();
434 }
435
436 void
437 Player::set_ignore_text ()
438 {
439         boost::mutex::scoped_lock lm (_mutex);
440         _ignore_text = true;
441         setup_pieces_unlocked ();
442 }
443
444 /** Set the player to always burn open texts into the image regardless of the content settings */
445 void
446 Player::set_always_burn_open_subtitles ()
447 {
448         boost::mutex::scoped_lock lm (_mutex);
449         _always_burn_open_subtitles = true;
450 }
451
452 /** Sets up the player to be faster, possibly at the expense of quality */
453 void
454 Player::set_fast ()
455 {
456         boost::mutex::scoped_lock lm (_mutex);
457         _fast = true;
458         setup_pieces_unlocked ();
459 }
460
461 void
462 Player::set_play_referenced ()
463 {
464         boost::mutex::scoped_lock lm (_mutex);
465         _play_referenced = true;
466         setup_pieces_unlocked ();
467 }
468
469 static void
470 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
471 {
472         DCPOMATIC_ASSERT (r);
473         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
474         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
475         if (r->actual_duration() > 0) {
476                 a.push_back (
477                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
478                         );
479         }
480 }
481
482 list<ReferencedReelAsset>
483 Player::get_reel_assets ()
484 {
485         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
486
487         list<ReferencedReelAsset> a;
488
489         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
490                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
491                 if (!j) {
492                         continue;
493                 }
494
495                 scoped_ptr<DCPDecoder> decoder;
496                 try {
497                         decoder.reset (new DCPDecoder (_film, j, false));
498                 } catch (...) {
499                         return a;
500                 }
501
502                 DCPOMATIC_ASSERT (j->video_frame_rate ());
503                 double const cfr = j->video_frame_rate().get();
504                 Frame const trim_start = j->trim_start().frames_round (cfr);
505                 Frame const trim_end = j->trim_end().frames_round (cfr);
506                 int const ffr = _film->video_frame_rate ();
507
508                 /* position in the asset from the start */
509                 int64_t offset_from_start = 0;
510                 /* position in the asset from the end */
511                 int64_t offset_from_end = 0;
512                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
513                         /* Assume that main picture duration is the length of the reel */
514                         offset_from_end += k->main_picture()->actual_duration();
515                 }
516
517                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
518
519                         /* Assume that main picture duration is the length of the reel */
520                         int64_t const reel_duration = k->main_picture()->actual_duration();
521
522                         /* See doc/design/trim_reels.svg */
523                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
524                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
525
526                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
527                         if (j->reference_video ()) {
528                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
529                         }
530
531                         if (j->reference_audio ()) {
532                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
533                         }
534
535                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
536                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
537                         }
538
539                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
540                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
541                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
542                                 }
543                         }
544
545                         offset_from_start += reel_duration;
546                         offset_from_end -= reel_duration;
547                 }
548         }
549
550         return a;
551 }
552
553 bool
554 Player::pass ()
555 {
556         boost::mutex::scoped_lock lm (_mutex);
557
558         if (_suspended) {
559                 /* We can't pass in this state */
560                 return false;
561         }
562
563         if (_playlist->length(_film) == DCPTime()) {
564                 /* Special case of an empty Film; just give one black frame */
565                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
566                 return true;
567         }
568
569         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
570
571         shared_ptr<Piece> earliest_content;
572         optional<DCPTime> earliest_time;
573
574         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
575                 if (i->done) {
576                         continue;
577                 }
578
579                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
580                 if (t > i->content->end(_film)) {
581                         i->done = true;
582                 } else {
583
584                         /* Given two choices at the same time, pick the one with texts so we see it before
585                            the video.
586                         */
587                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
588                                 earliest_time = t;
589                                 earliest_content = i;
590                         }
591                 }
592         }
593
594         bool done = false;
595
596         enum {
597                 NONE,
598                 CONTENT,
599                 BLACK,
600                 SILENT
601         } which = NONE;
602
603         if (earliest_content) {
604                 which = CONTENT;
605         }
606
607         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
608                 earliest_time = _black.position ();
609                 which = BLACK;
610         }
611
612         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
613                 earliest_time = _silent.position ();
614                 which = SILENT;
615         }
616
617         switch (which) {
618         case CONTENT:
619         {
620                 earliest_content->done = earliest_content->decoder->pass ();
621                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
622                 if (dcp && !_play_referenced && dcp->reference_audio()) {
623                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
624                            to `hide' the fact that no audio was emitted during the referenced DCP (though
625                            we need to behave as though it was).
626                         */
627                         _last_audio_time = dcp->end (_film);
628                 }
629                 break;
630         }
631         case BLACK:
632                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
633                 _black.set_position (_black.position() + one_video_frame());
634                 break;
635         case SILENT:
636         {
637                 DCPTimePeriod period (_silent.period_at_position());
638                 if (_last_audio_time) {
639                         /* Sometimes the thing that happened last finishes fractionally before
640                            or after this silence.  Bodge the start time of the silence to fix it.
641                         */
642                         DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
643                         period.from = *_last_audio_time;
644                 }
645                 if (period.duration() > one_video_frame()) {
646                         period.to = period.from + one_video_frame();
647                 }
648                 fill_audio (period);
649                 _silent.set_position (period.to);
650                 break;
651         }
652         case NONE:
653                 done = true;
654                 break;
655         }
656
657         /* Emit any audio that is ready */
658
659         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
660            of our streams, or the position of the _silent.
661         */
662         DCPTime pull_to = _film->length ();
663         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
664                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
665                         pull_to = i->second.last_push_end;
666                 }
667         }
668         if (!_silent.done() && _silent.position() < pull_to) {
669                 pull_to = _silent.position();
670         }
671
672         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
673         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
674                 if (_last_audio_time && i->second < *_last_audio_time) {
675                         /* This new data comes before the last we emitted (or the last seek); discard it */
676                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
677                         if (!cut.first) {
678                                 continue;
679                         }
680                         *i = cut;
681                 } else if (_last_audio_time && i->second > *_last_audio_time) {
682                         /* There's a gap between this data and the last we emitted; fill with silence */
683                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
684                 }
685
686                 emit_audio (i->first, i->second);
687         }
688
689         if (done) {
690                 _shuffler->flush ();
691                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
692                         do_emit_video(i->first, i->second);
693                 }
694         }
695
696         return done;
697 }
698
699 /** @return Open subtitles for the frame at the given time, converted to images */
700 optional<PositionImage>
701 Player::open_subtitles_for_frame (DCPTime time) const
702 {
703         list<PositionImage> captions;
704         int const vfr = _film->video_frame_rate();
705
706         BOOST_FOREACH (
707                 PlayerText j,
708                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
709                 ) {
710
711                 /* Bitmap subtitles */
712                 BOOST_FOREACH (BitmapText i, j.bitmap) {
713                         if (!i.image) {
714                                 continue;
715                         }
716
717                         /* i.image will already have been scaled to fit _video_container_size */
718                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
719
720                         captions.push_back (
721                                 PositionImage (
722                                         i.image,
723                                         Position<int> (
724                                                 lrint (_video_container_size.width * i.rectangle.x),
725                                                 lrint (_video_container_size.height * i.rectangle.y)
726                                                 )
727                                         )
728                                 );
729                 }
730
731                 /* String subtitles (rendered to an image) */
732                 if (!j.string.empty ()) {
733                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
734                         copy (s.begin(), s.end(), back_inserter (captions));
735                 }
736         }
737
738         if (captions.empty ()) {
739                 return optional<PositionImage> ();
740         }
741
742         return merge (captions);
743 }
744
745 void
746 Player::video (weak_ptr<Piece> wp, ContentVideo video)
747 {
748         shared_ptr<Piece> piece = wp.lock ();
749         if (!piece) {
750                 return;
751         }
752
753         FrameRateChange frc (_film, piece->content);
754         if (frc.skip && (video.frame % 2) == 1) {
755                 return;
756         }
757
758         /* Time of the first frame we will emit */
759         DCPTime const time = content_video_to_dcp (piece, video.frame);
760
761         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
762            if it's after the content's period here as in that case we still need to fill any gap between
763            `now' and the end of the content's period.
764         */
765         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
766                 return;
767         }
768
769         /* Fill gaps that we discover now that we have some video which needs to be emitted.
770            This is where we need to fill to.
771         */
772         DCPTime fill_to = min (time, piece->content->end(_film));
773
774         if (_last_video_time) {
775                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
776
777                 /* Fill if we have more than half a frame to do */
778                 if ((fill_to - fill_from) > one_video_frame() / 2) {
779                         LastVideoMap::const_iterator last = _last_video.find (wp);
780                         if (_film->three_d()) {
781                                 Eyes fill_to_eyes = video.eyes;
782                                 if (fill_to_eyes == EYES_BOTH) {
783                                         fill_to_eyes = EYES_LEFT;
784                                 }
785                                 if (fill_to == piece->content->end(_film)) {
786                                         /* Don't fill after the end of the content */
787                                         fill_to_eyes = EYES_LEFT;
788                                 }
789                                 DCPTime j = fill_from;
790                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
791                                 if (eyes == EYES_BOTH) {
792                                         eyes = EYES_LEFT;
793                                 }
794                                 while (j < fill_to || eyes != fill_to_eyes) {
795                                         if (last != _last_video.end()) {
796                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
797                                                 copy->set_eyes (eyes);
798                                                 emit_video (copy, j);
799                                         } else {
800                                                 emit_video (black_player_video_frame(eyes), j);
801                                         }
802                                         if (eyes == EYES_RIGHT) {
803                                                 j += one_video_frame();
804                                         }
805                                         eyes = increment_eyes (eyes);
806                                 }
807                         } else {
808                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
809                                         if (last != _last_video.end()) {
810                                                 emit_video (last->second, j);
811                                         } else {
812                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
813                                         }
814                                 }
815                         }
816                 }
817         }
818
819         _last_video[wp].reset (
820                 new PlayerVideo (
821                         video.image,
822                         piece->content->video->crop (),
823                         piece->content->video->fade (_film, video.frame),
824                         piece->content->video->scale().size (
825                                 piece->content->video, _video_container_size, _film->frame_size ()
826                                 ),
827                         _video_container_size,
828                         video.eyes,
829                         video.part,
830                         piece->content->video->colour_conversion(),
831                         piece->content->video->range(),
832                         piece->content,
833                         video.frame
834                         )
835                 );
836
837         DCPTime t = time;
838         for (int i = 0; i < frc.repeat; ++i) {
839                 if (t < piece->content->end(_film)) {
840                         emit_video (_last_video[wp], t);
841                 }
842                 t += one_video_frame ();
843         }
844 }
845
846 void
847 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
848 {
849         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
850
851         shared_ptr<Piece> piece = wp.lock ();
852         if (!piece) {
853                 return;
854         }
855
856         shared_ptr<AudioContent> content = piece->content->audio;
857         DCPOMATIC_ASSERT (content);
858
859         int const rfr = content->resampled_frame_rate (_film);
860
861         /* Compute time in the DCP */
862         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
863         /* And the end of this block in the DCP */
864         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
865
866         /* Remove anything that comes before the start or after the end of the content */
867         if (time < piece->content->position()) {
868                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
869                 if (!cut.first) {
870                         /* This audio is entirely discarded */
871                         return;
872                 }
873                 content_audio.audio = cut.first;
874                 time = cut.second;
875         } else if (time > piece->content->end(_film)) {
876                 /* Discard it all */
877                 return;
878         } else if (end > piece->content->end(_film)) {
879                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
880                 if (remaining_frames == 0) {
881                         return;
882                 }
883                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
884                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
885                 content_audio.audio = cut;
886         }
887
888         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
889
890         /* Gain */
891
892         if (content->gain() != 0) {
893                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
894                 gain->apply_gain (content->gain ());
895                 content_audio.audio = gain;
896         }
897
898         /* Remap */
899
900         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
901
902         /* Process */
903
904         if (_audio_processor) {
905                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
906         }
907
908         /* Push */
909
910         _audio_merger.push (content_audio.audio, time);
911         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
912         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
913 }
914
915 void
916 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
917 {
918         shared_ptr<Piece> piece = wp.lock ();
919         shared_ptr<const TextContent> text = wc.lock ();
920         if (!piece || !text) {
921                 return;
922         }
923
924         /* Apply content's subtitle offsets */
925         subtitle.sub.rectangle.x += text->x_offset ();
926         subtitle.sub.rectangle.y += text->y_offset ();
927
928         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
929         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
930         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
931
932         /* Apply content's subtitle scale */
933         subtitle.sub.rectangle.width *= text->x_scale ();
934         subtitle.sub.rectangle.height *= text->y_scale ();
935
936         PlayerText ps;
937         shared_ptr<Image> image = subtitle.sub.image;
938         /* We will scale the subtitle up to fit _video_container_size */
939         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
940         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
941         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
942
943         _active_texts[text->type()].add_from (wc, ps, from);
944 }
945
946 void
947 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
948 {
949         shared_ptr<Piece> piece = wp.lock ();
950         shared_ptr<const TextContent> text = wc.lock ();
951         if (!piece || !text) {
952                 return;
953         }
954
955         PlayerText ps;
956         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
957
958         if (from > piece->content->end(_film)) {
959                 return;
960         }
961
962         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
963                 s.set_h_position (s.h_position() + text->x_offset ());
964                 s.set_v_position (s.v_position() + text->y_offset ());
965                 float const xs = text->x_scale();
966                 float const ys = text->y_scale();
967                 float size = s.size();
968
969                 /* Adjust size to express the common part of the scaling;
970                    e.g. if xs = ys = 0.5 we scale size by 2.
971                 */
972                 if (xs > 1e-5 && ys > 1e-5) {
973                         size *= 1 / min (1 / xs, 1 / ys);
974                 }
975                 s.set_size (size);
976
977                 /* Then express aspect ratio changes */
978                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
979                         s.set_aspect_adjust (xs / ys);
980                 }
981
982                 s.set_in (dcp::Time(from.seconds(), 1000));
983                 ps.string.push_back (StringText (s, text->outline_width()));
984                 ps.add_fonts (text->fonts ());
985         }
986
987         _active_texts[text->type()].add_from (wc, ps, from);
988 }
989
990 void
991 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
992 {
993         shared_ptr<const TextContent> text = wc.lock ();
994         if (!text) {
995                 return;
996         }
997
998         if (!_active_texts[text->type()].have(wc)) {
999                 return;
1000         }
1001
1002         shared_ptr<Piece> piece = wp.lock ();
1003         if (!piece) {
1004                 return;
1005         }
1006
1007         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1008
1009         if (dcp_to > piece->content->end(_film)) {
1010                 return;
1011         }
1012
1013         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1014
1015         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1016         if (text->use() && !always && !text->burn()) {
1017                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1018         }
1019 }
1020
1021 void
1022 Player::seek (DCPTime time, bool accurate)
1023 {
1024         boost::mutex::scoped_lock lm (_mutex);
1025
1026         if (_suspended) {
1027                 /* We can't seek in this state */
1028                 return;
1029         }
1030
1031         if (_shuffler) {
1032                 _shuffler->clear ();
1033         }
1034
1035         _delay.clear ();
1036
1037         if (_audio_processor) {
1038                 _audio_processor->flush ();
1039         }
1040
1041         _audio_merger.clear ();
1042         for (int i = 0; i < TEXT_COUNT; ++i) {
1043                 _active_texts[i].clear ();
1044         }
1045
1046         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1047                 if (time < i->content->position()) {
1048                         /* Before; seek to the start of the content */
1049                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1050                         i->done = false;
1051                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1052                         /* During; seek to position */
1053                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1054                         i->done = false;
1055                 } else {
1056                         /* After; this piece is done */
1057                         i->done = true;
1058                 }
1059         }
1060
1061         if (accurate) {
1062                 _last_video_time = time;
1063                 _last_video_eyes = EYES_LEFT;
1064                 _last_audio_time = time;
1065         } else {
1066                 _last_video_time = optional<DCPTime>();
1067                 _last_video_eyes = optional<Eyes>();
1068                 _last_audio_time = optional<DCPTime>();
1069         }
1070
1071         _black.set_position (time);
1072         _silent.set_position (time);
1073
1074         _last_video.clear ();
1075 }
1076
1077 void
1078 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1079 {
1080         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1081            player before the video that requires them.
1082         */
1083         _delay.push_back (make_pair (pv, time));
1084
1085         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1086                 _last_video_time = time + one_video_frame();
1087         }
1088         _last_video_eyes = increment_eyes (pv->eyes());
1089
1090         if (_delay.size() < 3) {
1091                 return;
1092         }
1093
1094         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1095         _delay.pop_front();
1096         do_emit_video (to_do.first, to_do.second);
1097 }
1098
1099 void
1100 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1101 {
1102         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1103                 for (int i = 0; i < TEXT_COUNT; ++i) {
1104                         _active_texts[i].clear_before (time);
1105                 }
1106         }
1107
1108         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1109         if (subtitles) {
1110                 pv->set_text (subtitles.get ());
1111         }
1112
1113         Video (pv, time);
1114 }
1115
1116 void
1117 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1118 {
1119         /* Log if the assert below is about to fail */
1120         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1121                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1122         }
1123
1124         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1125         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1126         Audio (data, time, _film->audio_frame_rate());
1127         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1128 }
1129
1130 void
1131 Player::fill_audio (DCPTimePeriod period)
1132 {
1133         if (period.from == period.to) {
1134                 return;
1135         }
1136
1137         DCPOMATIC_ASSERT (period.from < period.to);
1138
1139         DCPTime t = period.from;
1140         while (t < period.to) {
1141                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1142                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1143                 if (samples) {
1144                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1145                         silence->make_silent ();
1146                         emit_audio (silence, t);
1147                 }
1148                 t += block;
1149         }
1150 }
1151
1152 DCPTime
1153 Player::one_video_frame () const
1154 {
1155         return DCPTime::from_frames (1, _film->video_frame_rate ());
1156 }
1157
1158 pair<shared_ptr<AudioBuffers>, DCPTime>
1159 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1160 {
1161         DCPTime const discard_time = discard_to - time;
1162         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1163         Frame remaining_frames = audio->frames() - discard_frames;
1164         if (remaining_frames <= 0) {
1165                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1166         }
1167         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1168         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1169         return make_pair(cut, time + discard_time);
1170 }
1171
1172 void
1173 Player::set_dcp_decode_reduction (optional<int> reduction)
1174 {
1175         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1176
1177         {
1178                 boost::mutex::scoped_lock lm (_mutex);
1179
1180                 if (reduction == _dcp_decode_reduction) {
1181                         lm.unlock ();
1182                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1183                         return;
1184                 }
1185
1186                 _dcp_decode_reduction = reduction;
1187                 setup_pieces_unlocked ();
1188         }
1189
1190         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1191 }
1192
1193 optional<DCPTime>
1194 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1195 {
1196         boost::mutex::scoped_lock lm (_mutex);
1197
1198         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1199                 if (i->content == content) {
1200                         return content_time_to_dcp (i, t);
1201                 }
1202         }
1203
1204         /* We couldn't find this content; perhaps things are being changed over */
1205         return optional<DCPTime>();
1206 }