6f0cfd1af7e9490f216b2d243742b86d081badb5
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 using std::list;
64 using std::cout;
65 using std::min;
66 using std::max;
67 using std::min;
68 using std::vector;
69 using std::pair;
70 using std::map;
71 using std::make_pair;
72 using std::copy;
73 using boost::shared_ptr;
74 using boost::weak_ptr;
75 using boost::dynamic_pointer_cast;
76 using boost::optional;
77 using boost::scoped_ptr;
78 using namespace dcpomatic;
79
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87         : _film (film)
88         , _playlist (playlist)
89         , _suspended (false)
90         , _ignore_video (false)
91         , _ignore_audio (false)
92         , _ignore_text (false)
93         , _always_burn_open_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
100         /* The butler must hear about this first, so since we are proxying this through to the butler we must
101            be first.
102         */
103         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
104         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
105         set_video_container_size (_film->frame_size ());
106
107         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
108
109         setup_pieces ();
110         seek (DCPTime (), true);
111 }
112
113 Player::~Player ()
114 {
115         delete _shuffler;
116 }
117
118 void
119 Player::setup_pieces ()
120 {
121         boost::mutex::scoped_lock lm (_mutex);
122         setup_pieces_unlocked ();
123 }
124
125 bool
126 have_video (shared_ptr<Piece> piece)
127 {
128         return piece->decoder && piece->decoder->video;
129 }
130
131 bool
132 have_audio (shared_ptr<Piece> piece)
133 {
134         return piece->decoder && piece->decoder->audio;
135 }
136
137 void
138 Player::setup_pieces_unlocked ()
139 {
140         _pieces.clear ();
141
142         delete _shuffler;
143         _shuffler = new Shuffler();
144         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
145
146         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
147
148                 if (!i->paths_valid ()) {
149                         continue;
150                 }
151
152                 if (_ignore_video && _ignore_audio && i->text.empty()) {
153                         /* We're only interested in text and this content has none */
154                         continue;
155                 }
156
157                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
158                 FrameRateChange frc (_film, i);
159
160                 if (!decoder) {
161                         /* Not something that we can decode; e.g. Atmos content */
162                         continue;
163                 }
164
165                 if (decoder->video && _ignore_video) {
166                         decoder->video->set_ignore (true);
167                 }
168
169                 if (decoder->audio && _ignore_audio) {
170                         decoder->audio->set_ignore (true);
171                 }
172
173                 if (_ignore_text) {
174                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
175                                 i->set_ignore (true);
176                         }
177                 }
178
179                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
180                 if (dcp) {
181                         dcp->set_decode_referenced (_play_referenced);
182                         if (_play_referenced) {
183                                 dcp->set_forced_reduction (_dcp_decode_reduction);
184                         }
185                 }
186
187                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
188                 _pieces.push_back (piece);
189
190                 if (decoder->video) {
191                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
192                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
193                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
194                         } else {
195                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
196                         }
197                 }
198
199                 if (decoder->audio) {
200                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
201                 }
202
203                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
204
205                 while (j != decoder->text.end()) {
206                         (*j)->BitmapStart.connect (
207                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
208                                 );
209                         (*j)->PlainStart.connect (
210                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
211                                 );
212                         (*j)->Stop.connect (
213                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
214                                 );
215
216                         ++j;
217                 }
218         }
219
220         _stream_states.clear ();
221         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
222                 if (i->content->audio) {
223                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
224                                 _stream_states[j] = StreamState (i, i->content->position ());
225                         }
226                 }
227         }
228
229         _black = Empty (_film, _pieces, bind(&have_video, _1));
230         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
231
232         _last_video_time = DCPTime ();
233         _last_video_eyes = EYES_BOTH;
234         _last_audio_time = DCPTime ();
235 }
236
237 void
238 Player::playlist_content_change (ChangeType type, int property, bool frequent)
239 {
240         if (type == CHANGE_TYPE_PENDING) {
241                 boost::mutex::scoped_lock lm (_mutex);
242                 /* The player content is probably about to change, so we can't carry on
243                    until that has happened and we've rebuilt our pieces.  Stop pass()
244                    and seek() from working until then.
245                 */
246                 _suspended = true;
247         } else if (type == CHANGE_TYPE_DONE) {
248                 /* A change in our content has gone through.  Re-build our pieces. */
249                 setup_pieces ();
250                 _suspended = false;
251         } else if (type == CHANGE_TYPE_CANCELLED) {
252                 boost::mutex::scoped_lock lm (_mutex);
253                 _suspended = false;
254         }
255
256         Change (type, property, frequent);
257 }
258
259 void
260 Player::set_video_container_size (dcp::Size s)
261 {
262         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
263
264         {
265                 boost::mutex::scoped_lock lm (_mutex);
266
267                 if (s == _video_container_size) {
268                         lm.unlock ();
269                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
270                         return;
271                 }
272
273                 _video_container_size = s;
274
275                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
276                 _black_image->make_black ();
277         }
278
279         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
280 }
281
282 void
283 Player::playlist_change (ChangeType type)
284 {
285         if (type == CHANGE_TYPE_DONE) {
286                 setup_pieces ();
287         }
288         Change (type, PlayerProperty::PLAYLIST, false);
289 }
290
291 void
292 Player::film_change (ChangeType type, Film::Property p)
293 {
294         /* Here we should notice Film properties that affect our output, and
295            alert listeners that our output now would be different to how it was
296            last time we were run.
297         */
298
299         if (p == Film::CONTAINER) {
300                 Change (type, PlayerProperty::FILM_CONTAINER, false);
301         } else if (p == Film::VIDEO_FRAME_RATE) {
302                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
303                    so we need new pieces here.
304                 */
305                 if (type == CHANGE_TYPE_DONE) {
306                         setup_pieces ();
307                 }
308                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
309         } else if (p == Film::AUDIO_PROCESSOR) {
310                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
311                         boost::mutex::scoped_lock lm (_mutex);
312                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
313                 }
314         } else if (p == Film::AUDIO_CHANNELS) {
315                 if (type == CHANGE_TYPE_DONE) {
316                         boost::mutex::scoped_lock lm (_mutex);
317                         _audio_merger.clear ();
318                 }
319         }
320 }
321
322 shared_ptr<PlayerVideo>
323 Player::black_player_video_frame (Eyes eyes) const
324 {
325         return shared_ptr<PlayerVideo> (
326                 new PlayerVideo (
327                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
328                         Crop (),
329                         optional<double> (),
330                         _video_container_size,
331                         _video_container_size,
332                         eyes,
333                         PART_WHOLE,
334                         PresetColourConversion::all().front().conversion,
335                         VIDEO_RANGE_FULL,
336                         boost::weak_ptr<Content>(),
337                         boost::optional<Frame>()
338                 )
339         );
340 }
341
342 Frame
343 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
344 {
345         DCPTime s = t - piece->content->position ();
346         s = min (piece->content->length_after_trim(_film), s);
347         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
348
349         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
350            then convert that ContentTime to frames at the content's rate.  However this fails for
351            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
352            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
353
354            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
355         */
356         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
357 }
358
359 DCPTime
360 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
361 {
362         /* See comment in dcp_to_content_video */
363         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
364         return d + piece->content->position();
365 }
366
367 Frame
368 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
369 {
370         DCPTime s = t - piece->content->position ();
371         s = min (piece->content->length_after_trim(_film), s);
372         /* See notes in dcp_to_content_video */
373         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
374 }
375
376 DCPTime
377 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
378 {
379         /* See comment in dcp_to_content_video */
380         return DCPTime::from_frames (f, _film->audio_frame_rate())
381                 - DCPTime (piece->content->trim_start(), piece->frc)
382                 + piece->content->position();
383 }
384
385 ContentTime
386 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
387 {
388         DCPTime s = t - piece->content->position ();
389         s = min (piece->content->length_after_trim(_film), s);
390         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
391 }
392
393 DCPTime
394 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
395 {
396         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
397 }
398
399 list<shared_ptr<Font> >
400 Player::get_subtitle_fonts ()
401 {
402         boost::mutex::scoped_lock lm (_mutex);
403
404         list<shared_ptr<Font> > fonts;
405         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
406                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
407                         /* XXX: things may go wrong if there are duplicate font IDs
408                            with different font files.
409                         */
410                         list<shared_ptr<Font> > f = j->fonts ();
411                         copy (f.begin(), f.end(), back_inserter (fonts));
412                 }
413         }
414
415         return fonts;
416 }
417
418 /** Set this player never to produce any video data */
419 void
420 Player::set_ignore_video ()
421 {
422         boost::mutex::scoped_lock lm (_mutex);
423         _ignore_video = true;
424         setup_pieces_unlocked ();
425 }
426
427 void
428 Player::set_ignore_audio ()
429 {
430         boost::mutex::scoped_lock lm (_mutex);
431         _ignore_audio = true;
432         setup_pieces_unlocked ();
433 }
434
435 void
436 Player::set_ignore_text ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439         _ignore_text = true;
440         setup_pieces_unlocked ();
441 }
442
443 /** Set the player to always burn open texts into the image regardless of the content settings */
444 void
445 Player::set_always_burn_open_subtitles ()
446 {
447         boost::mutex::scoped_lock lm (_mutex);
448         _always_burn_open_subtitles = true;
449 }
450
451 /** Sets up the player to be faster, possibly at the expense of quality */
452 void
453 Player::set_fast ()
454 {
455         boost::mutex::scoped_lock lm (_mutex);
456         _fast = true;
457         setup_pieces_unlocked ();
458 }
459
460 void
461 Player::set_play_referenced ()
462 {
463         boost::mutex::scoped_lock lm (_mutex);
464         _play_referenced = true;
465         setup_pieces_unlocked ();
466 }
467
468 static void
469 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
470 {
471         DCPOMATIC_ASSERT (r);
472         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
473         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
474         if (r->actual_duration() > 0) {
475                 a.push_back (
476                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
477                         );
478         }
479 }
480
481 list<ReferencedReelAsset>
482 Player::get_reel_assets ()
483 {
484         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
485
486         list<ReferencedReelAsset> a;
487
488         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
489                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
490                 if (!j) {
491                         continue;
492                 }
493
494                 scoped_ptr<DCPDecoder> decoder;
495                 try {
496                         decoder.reset (new DCPDecoder (_film, j, false));
497                 } catch (...) {
498                         return a;
499                 }
500
501                 DCPOMATIC_ASSERT (j->video_frame_rate ());
502                 double const cfr = j->video_frame_rate().get();
503                 Frame const trim_start = j->trim_start().frames_round (cfr);
504                 Frame const trim_end = j->trim_end().frames_round (cfr);
505                 int const ffr = _film->video_frame_rate ();
506
507                 /* position in the asset from the start */
508                 int64_t offset_from_start = 0;
509                 /* position in the asset from the end */
510                 int64_t offset_from_end = 0;
511                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
512                         /* Assume that main picture duration is the length of the reel */
513                         offset_from_end += k->main_picture()->actual_duration();
514                 }
515
516                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
517
518                         /* Assume that main picture duration is the length of the reel */
519                         int64_t const reel_duration = k->main_picture()->actual_duration();
520
521                         /* See doc/design/trim_reels.svg */
522                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
523                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
524
525                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
526                         if (j->reference_video ()) {
527                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
528                         }
529
530                         if (j->reference_audio ()) {
531                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
532                         }
533
534                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
535                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
536                         }
537
538                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
539                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
540                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
541                                 }
542                         }
543
544                         offset_from_start += reel_duration;
545                         offset_from_end -= reel_duration;
546                 }
547         }
548
549         return a;
550 }
551
552 bool
553 Player::pass ()
554 {
555         boost::mutex::scoped_lock lm (_mutex);
556
557         if (_suspended) {
558                 /* We can't pass in this state */
559                 return false;
560         }
561
562         if (_playlist->length(_film) == DCPTime()) {
563                 /* Special case of an empty Film; just give one black frame */
564                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
565                 return true;
566         }
567
568         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
569
570         shared_ptr<Piece> earliest_content;
571         optional<DCPTime> earliest_time;
572
573         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
574                 if (i->done) {
575                         continue;
576                 }
577
578                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
579                 if (t > i->content->end(_film)) {
580                         i->done = true;
581                 } else {
582
583                         /* Given two choices at the same time, pick the one with texts so we see it before
584                            the video.
585                         */
586                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
587                                 earliest_time = t;
588                                 earliest_content = i;
589                         }
590                 }
591         }
592
593         bool done = false;
594
595         enum {
596                 NONE,
597                 CONTENT,
598                 BLACK,
599                 SILENT
600         } which = NONE;
601
602         if (earliest_content) {
603                 which = CONTENT;
604         }
605
606         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
607                 earliest_time = _black.position ();
608                 which = BLACK;
609         }
610
611         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
612                 earliest_time = _silent.position ();
613                 which = SILENT;
614         }
615
616         switch (which) {
617         case CONTENT:
618         {
619                 earliest_content->done = earliest_content->decoder->pass ();
620                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
621                 if (dcp && !_play_referenced && dcp->reference_audio()) {
622                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
623                            to `hide' the fact that no audio was emitted during the referenced DCP (though
624                            we need to behave as though it was).
625                         */
626                         _last_audio_time = dcp->end (_film);
627                 }
628                 break;
629         }
630         case BLACK:
631                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
632                 _black.set_position (_black.position() + one_video_frame());
633                 break;
634         case SILENT:
635         {
636                 DCPTimePeriod period (_silent.period_at_position());
637                 if (_last_audio_time) {
638                         /* Sometimes the thing that happened last finishes fractionally before
639                            or after this silence.  Bodge the start time of the silence to fix it.
640                         */
641                         DCPOMATIC_ASSERT (labs(period.from.get() - _last_audio_time->get()) < 2);
642                         period.from = *_last_audio_time;
643                 }
644                 if (period.duration() > one_video_frame()) {
645                         period.to = period.from + one_video_frame();
646                 }
647                 fill_audio (period);
648                 _silent.set_position (period.to);
649                 break;
650         }
651         case NONE:
652                 done = true;
653                 break;
654         }
655
656         /* Emit any audio that is ready */
657
658         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
659            of our streams, or the position of the _silent.
660         */
661         DCPTime pull_to = _film->length ();
662         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
663                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
664                         pull_to = i->second.last_push_end;
665                 }
666         }
667         if (!_silent.done() && _silent.position() < pull_to) {
668                 pull_to = _silent.position();
669         }
670
671         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
672         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
673                 if (_last_audio_time && i->second < *_last_audio_time) {
674                         /* This new data comes before the last we emitted (or the last seek); discard it */
675                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
676                         if (!cut.first) {
677                                 continue;
678                         }
679                         *i = cut;
680                 } else if (_last_audio_time && i->second > *_last_audio_time) {
681                         /* There's a gap between this data and the last we emitted; fill with silence */
682                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
683                 }
684
685                 emit_audio (i->first, i->second);
686         }
687
688         if (done) {
689                 _shuffler->flush ();
690                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
691                         do_emit_video(i->first, i->second);
692                 }
693         }
694
695         return done;
696 }
697
698 /** @return Open subtitles for the frame at the given time, converted to images */
699 optional<PositionImage>
700 Player::open_subtitles_for_frame (DCPTime time) const
701 {
702         list<PositionImage> captions;
703         int const vfr = _film->video_frame_rate();
704
705         BOOST_FOREACH (
706                 PlayerText j,
707                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
708                 ) {
709
710                 /* Bitmap subtitles */
711                 BOOST_FOREACH (BitmapText i, j.bitmap) {
712                         if (!i.image) {
713                                 continue;
714                         }
715
716                         /* i.image will already have been scaled to fit _video_container_size */
717                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
718
719                         captions.push_back (
720                                 PositionImage (
721                                         i.image,
722                                         Position<int> (
723                                                 lrint (_video_container_size.width * i.rectangle.x),
724                                                 lrint (_video_container_size.height * i.rectangle.y)
725                                                 )
726                                         )
727                                 );
728                 }
729
730                 /* String subtitles (rendered to an image) */
731                 if (!j.string.empty ()) {
732                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
733                         copy (s.begin(), s.end(), back_inserter (captions));
734                 }
735         }
736
737         if (captions.empty ()) {
738                 return optional<PositionImage> ();
739         }
740
741         return merge (captions);
742 }
743
744 void
745 Player::video (weak_ptr<Piece> wp, ContentVideo video)
746 {
747         shared_ptr<Piece> piece = wp.lock ();
748         if (!piece) {
749                 return;
750         }
751
752         FrameRateChange frc (_film, piece->content);
753         if (frc.skip && (video.frame % 2) == 1) {
754                 return;
755         }
756
757         /* Time of the first frame we will emit */
758         DCPTime const time = content_video_to_dcp (piece, video.frame);
759
760         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
761            if it's after the content's period here as in that case we still need to fill any gap between
762            `now' and the end of the content's period.
763         */
764         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
765                 return;
766         }
767
768         /* Fill gaps that we discover now that we have some video which needs to be emitted.
769            This is where we need to fill to.
770         */
771         DCPTime fill_to = min (time, piece->content->end(_film));
772
773         if (_last_video_time) {
774                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
775
776                 /* Fill if we have more than half a frame to do */
777                 if ((fill_to - fill_from) > one_video_frame() / 2) {
778                         LastVideoMap::const_iterator last = _last_video.find (wp);
779                         if (_film->three_d()) {
780                                 Eyes fill_to_eyes = video.eyes;
781                                 if (fill_to_eyes == EYES_BOTH) {
782                                         fill_to_eyes = EYES_LEFT;
783                                 }
784                                 if (fill_to == piece->content->end(_film)) {
785                                         /* Don't fill after the end of the content */
786                                         fill_to_eyes = EYES_LEFT;
787                                 }
788                                 DCPTime j = fill_from;
789                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
790                                 if (eyes == EYES_BOTH) {
791                                         eyes = EYES_LEFT;
792                                 }
793                                 while (j < fill_to || eyes != fill_to_eyes) {
794                                         if (last != _last_video.end()) {
795                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
796                                                 copy->set_eyes (eyes);
797                                                 emit_video (copy, j);
798                                         } else {
799                                                 emit_video (black_player_video_frame(eyes), j);
800                                         }
801                                         if (eyes == EYES_RIGHT) {
802                                                 j += one_video_frame();
803                                         }
804                                         eyes = increment_eyes (eyes);
805                                 }
806                         } else {
807                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
808                                         if (last != _last_video.end()) {
809                                                 emit_video (last->second, j);
810                                         } else {
811                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
812                                         }
813                                 }
814                         }
815                 }
816         }
817
818         _last_video[wp].reset (
819                 new PlayerVideo (
820                         video.image,
821                         piece->content->video->crop (),
822                         piece->content->video->fade (_film, video.frame),
823                         piece->content->video->scale().size (
824                                 piece->content->video, _video_container_size, _film->frame_size ()
825                                 ),
826                         _video_container_size,
827                         video.eyes,
828                         video.part,
829                         piece->content->video->colour_conversion(),
830                         piece->content->video->range(),
831                         piece->content,
832                         video.frame
833                         )
834                 );
835
836         DCPTime t = time;
837         for (int i = 0; i < frc.repeat; ++i) {
838                 if (t < piece->content->end(_film)) {
839                         emit_video (_last_video[wp], t);
840                 }
841                 t += one_video_frame ();
842         }
843 }
844
845 void
846 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
847 {
848         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
849
850         shared_ptr<Piece> piece = wp.lock ();
851         if (!piece) {
852                 return;
853         }
854
855         shared_ptr<AudioContent> content = piece->content->audio;
856         DCPOMATIC_ASSERT (content);
857
858         int const rfr = content->resampled_frame_rate (_film);
859
860         /* Compute time in the DCP */
861         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
862         /* And the end of this block in the DCP */
863         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
864
865         /* Remove anything that comes before the start or after the end of the content */
866         if (time < piece->content->position()) {
867                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
868                 if (!cut.first) {
869                         /* This audio is entirely discarded */
870                         return;
871                 }
872                 content_audio.audio = cut.first;
873                 time = cut.second;
874         } else if (time > piece->content->end(_film)) {
875                 /* Discard it all */
876                 return;
877         } else if (end > piece->content->end(_film)) {
878                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
879                 if (remaining_frames == 0) {
880                         return;
881                 }
882                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
883                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
884                 content_audio.audio = cut;
885         }
886
887         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
888
889         /* Gain */
890
891         if (content->gain() != 0) {
892                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
893                 gain->apply_gain (content->gain ());
894                 content_audio.audio = gain;
895         }
896
897         /* Remap */
898
899         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
900
901         /* Process */
902
903         if (_audio_processor) {
904                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
905         }
906
907         /* Push */
908
909         _audio_merger.push (content_audio.audio, time);
910         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
911         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
912 }
913
914 void
915 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
916 {
917         shared_ptr<Piece> piece = wp.lock ();
918         shared_ptr<const TextContent> text = wc.lock ();
919         if (!piece || !text) {
920                 return;
921         }
922
923         /* Apply content's subtitle offsets */
924         subtitle.sub.rectangle.x += text->x_offset ();
925         subtitle.sub.rectangle.y += text->y_offset ();
926
927         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
928         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
929         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
930
931         /* Apply content's subtitle scale */
932         subtitle.sub.rectangle.width *= text->x_scale ();
933         subtitle.sub.rectangle.height *= text->y_scale ();
934
935         PlayerText ps;
936         shared_ptr<Image> image = subtitle.sub.image;
937         /* We will scale the subtitle up to fit _video_container_size */
938         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
939         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
940         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
941
942         _active_texts[text->type()].add_from (wc, ps, from);
943 }
944
945 void
946 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
947 {
948         shared_ptr<Piece> piece = wp.lock ();
949         shared_ptr<const TextContent> text = wc.lock ();
950         if (!piece || !text) {
951                 return;
952         }
953
954         PlayerText ps;
955         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
956
957         if (from > piece->content->end(_film)) {
958                 return;
959         }
960
961         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
962                 s.set_h_position (s.h_position() + text->x_offset ());
963                 s.set_v_position (s.v_position() + text->y_offset ());
964                 float const xs = text->x_scale();
965                 float const ys = text->y_scale();
966                 float size = s.size();
967
968                 /* Adjust size to express the common part of the scaling;
969                    e.g. if xs = ys = 0.5 we scale size by 2.
970                 */
971                 if (xs > 1e-5 && ys > 1e-5) {
972                         size *= 1 / min (1 / xs, 1 / ys);
973                 }
974                 s.set_size (size);
975
976                 /* Then express aspect ratio changes */
977                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
978                         s.set_aspect_adjust (xs / ys);
979                 }
980
981                 s.set_in (dcp::Time(from.seconds(), 1000));
982                 ps.string.push_back (StringText (s, text->outline_width()));
983                 ps.add_fonts (text->fonts ());
984         }
985
986         _active_texts[text->type()].add_from (wc, ps, from);
987 }
988
989 void
990 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
991 {
992         shared_ptr<const TextContent> text = wc.lock ();
993         if (!text) {
994                 return;
995         }
996
997         if (!_active_texts[text->type()].have(wc)) {
998                 return;
999         }
1000
1001         shared_ptr<Piece> piece = wp.lock ();
1002         if (!piece) {
1003                 return;
1004         }
1005
1006         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1007
1008         if (dcp_to > piece->content->end(_film)) {
1009                 return;
1010         }
1011
1012         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1013
1014         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1015         if (text->use() && !always && !text->burn()) {
1016                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1017         }
1018 }
1019
1020 void
1021 Player::seek (DCPTime time, bool accurate)
1022 {
1023         boost::mutex::scoped_lock lm (_mutex);
1024
1025         if (_suspended) {
1026                 /* We can't seek in this state */
1027                 return;
1028         }
1029
1030         if (_shuffler) {
1031                 _shuffler->clear ();
1032         }
1033
1034         _delay.clear ();
1035
1036         if (_audio_processor) {
1037                 _audio_processor->flush ();
1038         }
1039
1040         _audio_merger.clear ();
1041         for (int i = 0; i < TEXT_COUNT; ++i) {
1042                 _active_texts[i].clear ();
1043         }
1044
1045         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1046                 if (time < i->content->position()) {
1047                         /* Before; seek to the start of the content */
1048                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1049                         i->done = false;
1050                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1051                         /* During; seek to position */
1052                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1053                         i->done = false;
1054                 } else {
1055                         /* After; this piece is done */
1056                         i->done = true;
1057                 }
1058         }
1059
1060         if (accurate) {
1061                 _last_video_time = time;
1062                 _last_video_eyes = EYES_LEFT;
1063                 _last_audio_time = time;
1064         } else {
1065                 _last_video_time = optional<DCPTime>();
1066                 _last_video_eyes = optional<Eyes>();
1067                 _last_audio_time = optional<DCPTime>();
1068         }
1069
1070         _black.set_position (time);
1071         _silent.set_position (time);
1072
1073         _last_video.clear ();
1074 }
1075
1076 void
1077 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1078 {
1079         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1080            player before the video that requires them.
1081         */
1082         _delay.push_back (make_pair (pv, time));
1083
1084         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1085                 _last_video_time = time + one_video_frame();
1086         }
1087         _last_video_eyes = increment_eyes (pv->eyes());
1088
1089         if (_delay.size() < 3) {
1090                 return;
1091         }
1092
1093         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1094         _delay.pop_front();
1095         do_emit_video (to_do.first, to_do.second);
1096 }
1097
1098 void
1099 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1100 {
1101         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1102                 for (int i = 0; i < TEXT_COUNT; ++i) {
1103                         _active_texts[i].clear_before (time);
1104                 }
1105         }
1106
1107         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1108         if (subtitles) {
1109                 pv->set_text (subtitles.get ());
1110         }
1111
1112         Video (pv, time);
1113 }
1114
1115 void
1116 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1117 {
1118         /* Log if the assert below is about to fail */
1119         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1120                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1121         }
1122
1123         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1124         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1125         Audio (data, time, _film->audio_frame_rate());
1126         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1127 }
1128
1129 void
1130 Player::fill_audio (DCPTimePeriod period)
1131 {
1132         if (period.from == period.to) {
1133                 return;
1134         }
1135
1136         DCPOMATIC_ASSERT (period.from < period.to);
1137
1138         DCPTime t = period.from;
1139         while (t < period.to) {
1140                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1141                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1142                 if (samples) {
1143                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1144                         silence->make_silent ();
1145                         emit_audio (silence, t);
1146                 }
1147                 t += block;
1148         }
1149 }
1150
1151 DCPTime
1152 Player::one_video_frame () const
1153 {
1154         return DCPTime::from_frames (1, _film->video_frame_rate ());
1155 }
1156
1157 pair<shared_ptr<AudioBuffers>, DCPTime>
1158 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1159 {
1160         DCPTime const discard_time = discard_to - time;
1161         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1162         Frame remaining_frames = audio->frames() - discard_frames;
1163         if (remaining_frames <= 0) {
1164                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1165         }
1166         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1167         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1168         return make_pair(cut, time + discard_time);
1169 }
1170
1171 void
1172 Player::set_dcp_decode_reduction (optional<int> reduction)
1173 {
1174         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1175
1176         {
1177                 boost::mutex::scoped_lock lm (_mutex);
1178
1179                 if (reduction == _dcp_decode_reduction) {
1180                         lm.unlock ();
1181                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1182                         return;
1183                 }
1184
1185                 _dcp_decode_reduction = reduction;
1186                 setup_pieces_unlocked ();
1187         }
1188
1189         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1190 }
1191
1192 optional<DCPTime>
1193 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1194 {
1195         boost::mutex::scoped_lock lm (_mutex);
1196
1197         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1198                 if (i->content == content) {
1199                         return content_time_to_dcp (i, t);
1200                 }
1201         }
1202
1203         /* We couldn't find this content; perhaps things are being changed over */
1204         return optional<DCPTime>();
1205 }