Put Film pointer into Decoder.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) dcpomatic_log->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101         /* The butler must hear about this first, so since we are proxying this through to the butler we must
102            be first.
103         */
104         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106         set_video_container_size (_film->frame_size ());
107
108         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109
110         setup_pieces ();
111         seek (DCPTime (), true);
112 }
113
114 Player::~Player ()
115 {
116         delete _shuffler;
117 }
118
119 void
120 Player::setup_pieces ()
121 {
122         boost::mutex::scoped_lock lm (_mutex);
123         setup_pieces_unlocked ();
124 }
125
126 bool
127 have_video (shared_ptr<Piece> piece)
128 {
129         return piece->decoder && piece->decoder->video;
130 }
131
132 bool
133 have_audio (shared_ptr<Piece> piece)
134 {
135         return piece->decoder && piece->decoder->audio;
136 }
137
138 void
139 Player::setup_pieces_unlocked ()
140 {
141         _pieces.clear ();
142
143         delete _shuffler;
144         _shuffler = new Shuffler();
145         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
146
147         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
148
149                 if (!i->paths_valid ()) {
150                         continue;
151                 }
152
153                 if (_ignore_video && _ignore_audio && i->text.empty()) {
154                         /* We're only interested in text and this content has none */
155                         continue;
156                 }
157
158                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast);
159                 FrameRateChange frc (i->active_video_frame_rate(_film), _film->video_frame_rate());
160
161                 if (!decoder) {
162                         /* Not something that we can decode; e.g. Atmos content */
163                         continue;
164                 }
165
166                 if (decoder->video && _ignore_video) {
167                         decoder->video->set_ignore (true);
168                 }
169
170                 if (decoder->audio && _ignore_audio) {
171                         decoder->audio->set_ignore (true);
172                 }
173
174                 if (_ignore_text) {
175                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
176                                 i->set_ignore (true);
177                         }
178                 }
179
180                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
181                 if (dcp) {
182                         dcp->set_decode_referenced (_play_referenced);
183                         if (_play_referenced) {
184                                 dcp->set_forced_reduction (_dcp_decode_reduction);
185                         }
186                 }
187
188                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
189                 _pieces.push_back (piece);
190
191                 if (decoder->video) {
192                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
193                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
194                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
195                         } else {
196                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
197                         }
198                 }
199
200                 if (decoder->audio) {
201                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
202                 }
203
204                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
205
206                 while (j != decoder->text.end()) {
207                         (*j)->BitmapStart.connect (
208                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
209                                 );
210                         (*j)->PlainStart.connect (
211                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
212                                 );
213                         (*j)->Stop.connect (
214                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
215                                 );
216
217                         ++j;
218                 }
219         }
220
221         _stream_states.clear ();
222         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
223                 if (i->content->audio) {
224                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
225                                 _stream_states[j] = StreamState (i, i->content->position ());
226                         }
227                 }
228         }
229
230         _black = Empty (_film, _pieces, bind(&have_video, _1));
231         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
232
233         _last_video_time = DCPTime ();
234         _last_video_eyes = EYES_BOTH;
235         _last_audio_time = DCPTime ();
236         _suspended = false;
237 }
238
239 void
240 Player::playlist_content_change (ChangeType type, int property, bool frequent)
241 {
242         if (type == CHANGE_TYPE_PENDING) {
243                 boost::mutex::scoped_lock lm (_mutex);
244                 /* The player content is probably about to change, so we can't carry on
245                    until that has happened and we've rebuilt our pieces.  Stop pass()
246                    and seek() from working until then.
247                 */
248                 _suspended = true;
249         } else if (type == CHANGE_TYPE_DONE) {
250                 /* A change in our content has gone through.  Re-build our pieces. */
251                 setup_pieces ();
252         } else if (type == CHANGE_TYPE_CANCELLED) {
253                 boost::mutex::scoped_lock lm (_mutex);
254                 _suspended = false;
255         }
256
257         Change (type, property, frequent);
258 }
259
260 void
261 Player::set_video_container_size (dcp::Size s)
262 {
263         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
264
265         {
266                 boost::mutex::scoped_lock lm (_mutex);
267
268                 if (s == _video_container_size) {
269                         lm.unlock ();
270                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
271                         return;
272                 }
273
274                 _video_container_size = s;
275
276                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
277                 _black_image->make_black ();
278         }
279
280         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
281 }
282
283 void
284 Player::playlist_change (ChangeType type)
285 {
286         if (type == CHANGE_TYPE_DONE) {
287                 setup_pieces ();
288         }
289         Change (type, PlayerProperty::PLAYLIST, false);
290 }
291
292 void
293 Player::film_change (ChangeType type, Film::Property p)
294 {
295         /* Here we should notice Film properties that affect our output, and
296            alert listeners that our output now would be different to how it was
297            last time we were run.
298         */
299
300         if (p == Film::CONTAINER) {
301                 Change (type, PlayerProperty::FILM_CONTAINER, false);
302         } else if (p == Film::VIDEO_FRAME_RATE) {
303                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
304                    so we need new pieces here.
305                 */
306                 if (type == CHANGE_TYPE_DONE) {
307                         setup_pieces ();
308                 }
309                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
310         } else if (p == Film::AUDIO_PROCESSOR) {
311                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
312                         boost::mutex::scoped_lock lm (_mutex);
313                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
314                 }
315         } else if (p == Film::AUDIO_CHANNELS) {
316                 if (type == CHANGE_TYPE_DONE) {
317                         boost::mutex::scoped_lock lm (_mutex);
318                         _audio_merger.clear ();
319                 }
320         }
321 }
322
323 shared_ptr<PlayerVideo>
324 Player::black_player_video_frame (Eyes eyes) const
325 {
326         return shared_ptr<PlayerVideo> (
327                 new PlayerVideo (
328                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
329                         Crop (),
330                         optional<double> (),
331                         _video_container_size,
332                         _video_container_size,
333                         eyes,
334                         PART_WHOLE,
335                         PresetColourConversion::all().front().conversion,
336                         boost::weak_ptr<Content>(),
337                         boost::optional<Frame>()
338                 )
339         );
340 }
341
342 Frame
343 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
344 {
345         DCPTime s = t - piece->content->position ();
346         s = min (piece->content->length_after_trim(_film), s);
347         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
348
349         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
350            then convert that ContentTime to frames at the content's rate.  However this fails for
351            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
352            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
353
354            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
355         */
356         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
357 }
358
359 DCPTime
360 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
361 {
362         /* See comment in dcp_to_content_video */
363         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
364         return d + piece->content->position();
365 }
366
367 Frame
368 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
369 {
370         DCPTime s = t - piece->content->position ();
371         s = min (piece->content->length_after_trim(_film), s);
372         /* See notes in dcp_to_content_video */
373         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
374 }
375
376 DCPTime
377 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
378 {
379         /* See comment in dcp_to_content_video */
380         return DCPTime::from_frames (f, _film->audio_frame_rate())
381                 - DCPTime (piece->content->trim_start(), piece->frc)
382                 + piece->content->position();
383 }
384
385 ContentTime
386 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
387 {
388         DCPTime s = t - piece->content->position ();
389         s = min (piece->content->length_after_trim(_film), s);
390         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
391 }
392
393 DCPTime
394 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
395 {
396         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
397 }
398
399 list<shared_ptr<Font> >
400 Player::get_subtitle_fonts ()
401 {
402         boost::mutex::scoped_lock lm (_mutex);
403
404         list<shared_ptr<Font> > fonts;
405         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
406                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
407                         /* XXX: things may go wrong if there are duplicate font IDs
408                            with different font files.
409                         */
410                         list<shared_ptr<Font> > f = j->fonts ();
411                         copy (f.begin(), f.end(), back_inserter (fonts));
412                 }
413         }
414
415         return fonts;
416 }
417
418 /** Set this player never to produce any video data */
419 void
420 Player::set_ignore_video ()
421 {
422         boost::mutex::scoped_lock lm (_mutex);
423         _ignore_video = true;
424         setup_pieces_unlocked ();
425 }
426
427 void
428 Player::set_ignore_audio ()
429 {
430         boost::mutex::scoped_lock lm (_mutex);
431         _ignore_audio = true;
432         setup_pieces_unlocked ();
433 }
434
435 void
436 Player::set_ignore_text ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439         _ignore_text = true;
440         setup_pieces_unlocked ();
441 }
442
443 /** Set the player to always burn open texts into the image regardless of the content settings */
444 void
445 Player::set_always_burn_open_subtitles ()
446 {
447         boost::mutex::scoped_lock lm (_mutex);
448         _always_burn_open_subtitles = true;
449 }
450
451 /** Sets up the player to be faster, possibly at the expense of quality */
452 void
453 Player::set_fast ()
454 {
455         boost::mutex::scoped_lock lm (_mutex);
456         _fast = true;
457         setup_pieces_unlocked ();
458 }
459
460 void
461 Player::set_play_referenced ()
462 {
463         boost::mutex::scoped_lock lm (_mutex);
464         _play_referenced = true;
465         setup_pieces_unlocked ();
466 }
467
468 list<ReferencedReelAsset>
469 Player::get_reel_assets ()
470 {
471         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
472
473         list<ReferencedReelAsset> a;
474
475         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
476                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
477                 if (!j) {
478                         continue;
479                 }
480
481                 scoped_ptr<DCPDecoder> decoder;
482                 try {
483                         decoder.reset (new DCPDecoder (_film, j, false));
484                 } catch (...) {
485                         return a;
486                 }
487
488                 int64_t offset = 0;
489                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
490
491                         DCPOMATIC_ASSERT (j->video_frame_rate ());
492                         double const cfr = j->video_frame_rate().get();
493                         Frame const trim_start = j->trim_start().frames_round (cfr);
494                         Frame const trim_end = j->trim_end().frames_round (cfr);
495                         int const ffr = _film->video_frame_rate ();
496
497                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
498                         if (j->reference_video ()) {
499                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
500                                 DCPOMATIC_ASSERT (ra);
501                                 ra->set_entry_point (ra->entry_point() + trim_start);
502                                 ra->set_duration (ra->duration() - trim_start - trim_end);
503                                 a.push_back (
504                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
505                                         );
506                         }
507
508                         if (j->reference_audio ()) {
509                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
510                                 DCPOMATIC_ASSERT (ra);
511                                 ra->set_entry_point (ra->entry_point() + trim_start);
512                                 ra->set_duration (ra->duration() - trim_start - trim_end);
513                                 a.push_back (
514                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
515                                         );
516                         }
517
518                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
519                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
520                                 DCPOMATIC_ASSERT (ra);
521                                 ra->set_entry_point (ra->entry_point() + trim_start);
522                                 ra->set_duration (ra->duration() - trim_start - trim_end);
523                                 a.push_back (
524                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
525                                         );
526                         }
527
528                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
529                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
530                                         DCPOMATIC_ASSERT (l);
531                                         l->set_entry_point (l->entry_point() + trim_start);
532                                         l->set_duration (l->duration() - trim_start - trim_end);
533                                         a.push_back (
534                                                 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
535                                                 );
536                                 }
537                         }
538
539                         /* Assume that main picture duration is the length of the reel */
540                         offset += k->main_picture()->duration ();
541                 }
542         }
543
544         return a;
545 }
546
547 bool
548 Player::pass ()
549 {
550         boost::mutex::scoped_lock lm (_mutex);
551
552         if (_suspended) {
553                 /* We can't pass in this state */
554                 return false;
555         }
556
557         if (_playlist->length(_film) == DCPTime()) {
558                 /* Special case of an empty Film; just give one black frame */
559                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
560                 return true;
561         }
562
563         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
564
565         shared_ptr<Piece> earliest_content;
566         optional<DCPTime> earliest_time;
567
568         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
569                 if (i->done) {
570                         continue;
571                 }
572
573                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
574                 if (t > i->content->end(_film)) {
575                         i->done = true;
576                 } else {
577
578                         /* Given two choices at the same time, pick the one with texts so we see it before
579                            the video.
580                         */
581                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
582                                 earliest_time = t;
583                                 earliest_content = i;
584                         }
585                 }
586         }
587
588         bool done = false;
589
590         enum {
591                 NONE,
592                 CONTENT,
593                 BLACK,
594                 SILENT
595         } which = NONE;
596
597         if (earliest_content) {
598                 which = CONTENT;
599         }
600
601         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
602                 earliest_time = _black.position ();
603                 which = BLACK;
604         }
605
606         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
607                 earliest_time = _silent.position ();
608                 which = SILENT;
609         }
610
611         switch (which) {
612         case CONTENT:
613                 earliest_content->done = earliest_content->decoder->pass ();
614                 break;
615         case BLACK:
616                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
617                 _black.set_position (_black.position() + one_video_frame());
618                 break;
619         case SILENT:
620         {
621                 DCPTimePeriod period (_silent.period_at_position());
622                 if (_last_audio_time) {
623                         /* Sometimes the thing that happened last finishes fractionally before
624                            this silence.  Bodge the start time of the silence to fix it.  I'm
625                            not sure if this is the right solution --- maybe the last thing should
626                            be padded `forward' rather than this thing padding `back'.
627                         */
628                         period.from = min(period.from, *_last_audio_time);
629                 }
630                 if (period.duration() > one_video_frame()) {
631                         period.to = period.from + one_video_frame();
632                 }
633                 fill_audio (period);
634                 _silent.set_position (period.to);
635                 break;
636         }
637         case NONE:
638                 done = true;
639                 break;
640         }
641
642         /* Emit any audio that is ready */
643
644         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
645            of our streams, or the position of the _silent.
646         */
647         DCPTime pull_to = _film->length ();
648         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
649                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
650                         pull_to = i->second.last_push_end;
651                 }
652         }
653         if (!_silent.done() && _silent.position() < pull_to) {
654                 pull_to = _silent.position();
655         }
656
657         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
658         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
659                 if (_last_audio_time && i->second < *_last_audio_time) {
660                         /* This new data comes before the last we emitted (or the last seek); discard it */
661                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
662                         if (!cut.first) {
663                                 continue;
664                         }
665                         *i = cut;
666                 } else if (_last_audio_time && i->second > *_last_audio_time) {
667                         /* There's a gap between this data and the last we emitted; fill with silence */
668                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
669                 }
670
671                 emit_audio (i->first, i->second);
672         }
673
674         if (done) {
675                 _shuffler->flush ();
676                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
677                         do_emit_video(i->first, i->second);
678                 }
679         }
680
681         return done;
682 }
683
684 /** @return Open subtitles for the frame at the given time, converted to images */
685 optional<PositionImage>
686 Player::open_subtitles_for_frame (DCPTime time) const
687 {
688         list<PositionImage> captions;
689         int const vfr = _film->video_frame_rate();
690
691         BOOST_FOREACH (
692                 PlayerText j,
693                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
694                 ) {
695
696                 /* Bitmap subtitles */
697                 BOOST_FOREACH (BitmapText i, j.bitmap) {
698                         if (!i.image) {
699                                 continue;
700                         }
701
702                         /* i.image will already have been scaled to fit _video_container_size */
703                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
704
705                         captions.push_back (
706                                 PositionImage (
707                                         i.image,
708                                         Position<int> (
709                                                 lrint (_video_container_size.width * i.rectangle.x),
710                                                 lrint (_video_container_size.height * i.rectangle.y)
711                                                 )
712                                         )
713                                 );
714                 }
715
716                 /* String subtitles (rendered to an image) */
717                 if (!j.string.empty ()) {
718                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
719                         copy (s.begin(), s.end(), back_inserter (captions));
720                 }
721         }
722
723         if (captions.empty ()) {
724                 return optional<PositionImage> ();
725         }
726
727         return merge (captions);
728 }
729
730 void
731 Player::video (weak_ptr<Piece> wp, ContentVideo video)
732 {
733         shared_ptr<Piece> piece = wp.lock ();
734         if (!piece) {
735                 return;
736         }
737
738         FrameRateChange frc(piece->content->active_video_frame_rate(_film), _film->video_frame_rate());
739         if (frc.skip && (video.frame % 2) == 1) {
740                 return;
741         }
742
743         /* Time of the first frame we will emit */
744         DCPTime const time = content_video_to_dcp (piece, video.frame);
745
746         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
747            if it's after the content's period here as in that case we still need to fill any gap between
748            `now' and the end of the content's period.
749         */
750         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
751                 return;
752         }
753
754         /* Fill gaps that we discover now that we have some video which needs to be emitted.
755            This is where we need to fill to.
756         */
757         DCPTime fill_to = min (time, piece->content->end(_film));
758
759         if (_last_video_time) {
760                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
761                 LastVideoMap::const_iterator last = _last_video.find (wp);
762                 if (_film->three_d()) {
763                         Eyes fill_to_eyes = video.eyes;
764                         if (fill_to_eyes == EYES_BOTH) {
765                                 fill_to_eyes = EYES_LEFT;
766                         }
767                         if (fill_to == piece->content->end(_film)) {
768                                 /* Don't fill after the end of the content */
769                                 fill_to_eyes = EYES_LEFT;
770                         }
771                         DCPTime j = fill_from;
772                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
773                         if (eyes == EYES_BOTH) {
774                                 eyes = EYES_LEFT;
775                         }
776                         while (j < fill_to || eyes != fill_to_eyes) {
777                                 if (last != _last_video.end()) {
778                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
779                                         copy->set_eyes (eyes);
780                                         emit_video (copy, j);
781                                 } else {
782                                         emit_video (black_player_video_frame(eyes), j);
783                                 }
784                                 if (eyes == EYES_RIGHT) {
785                                         j += one_video_frame();
786                                 }
787                                 eyes = increment_eyes (eyes);
788                         }
789                 } else {
790                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
791                                 if (last != _last_video.end()) {
792                                         emit_video (last->second, j);
793                                 } else {
794                                         emit_video (black_player_video_frame(EYES_BOTH), j);
795                                 }
796                         }
797                 }
798         }
799
800         _last_video[wp].reset (
801                 new PlayerVideo (
802                         video.image,
803                         piece->content->video->crop (),
804                         piece->content->video->fade (_film, video.frame),
805                         piece->content->video->scale().size (
806                                 piece->content->video, _video_container_size, _film->frame_size ()
807                                 ),
808                         _video_container_size,
809                         video.eyes,
810                         video.part,
811                         piece->content->video->colour_conversion(),
812                         piece->content,
813                         video.frame
814                         )
815                 );
816
817         DCPTime t = time;
818         for (int i = 0; i < frc.repeat; ++i) {
819                 if (t < piece->content->end(_film)) {
820                         emit_video (_last_video[wp], t);
821                 }
822                 t += one_video_frame ();
823         }
824 }
825
826 void
827 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
828 {
829         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
830
831         shared_ptr<Piece> piece = wp.lock ();
832         if (!piece) {
833                 return;
834         }
835
836         shared_ptr<AudioContent> content = piece->content->audio;
837         DCPOMATIC_ASSERT (content);
838
839         /* Compute time in the DCP */
840         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
841         /* And the end of this block in the DCP */
842         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate(_film));
843
844         /* Remove anything that comes before the start or after the end of the content */
845         if (time < piece->content->position()) {
846                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
847                 if (!cut.first) {
848                         /* This audio is entirely discarded */
849                         return;
850                 }
851                 content_audio.audio = cut.first;
852                 time = cut.second;
853         } else if (time > piece->content->end(_film)) {
854                 /* Discard it all */
855                 return;
856         } else if (end > piece->content->end(_film)) {
857                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(_film->audio_frame_rate());
858                 if (remaining_frames == 0) {
859                         return;
860                 }
861                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
862                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
863                 content_audio.audio = cut;
864         }
865
866         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
867
868         /* Gain */
869
870         if (content->gain() != 0) {
871                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
872                 gain->apply_gain (content->gain ());
873                 content_audio.audio = gain;
874         }
875
876         /* Remap */
877
878         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
879
880         /* Process */
881
882         if (_audio_processor) {
883                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
884         }
885
886         /* Push */
887
888         _audio_merger.push (content_audio.audio, time);
889         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
890         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
891 }
892
893 void
894 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
895 {
896         shared_ptr<Piece> piece = wp.lock ();
897         shared_ptr<const TextContent> text = wc.lock ();
898         if (!piece || !text) {
899                 return;
900         }
901
902         /* Apply content's subtitle offsets */
903         subtitle.sub.rectangle.x += text->x_offset ();
904         subtitle.sub.rectangle.y += text->y_offset ();
905
906         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
907         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
908         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
909
910         /* Apply content's subtitle scale */
911         subtitle.sub.rectangle.width *= text->x_scale ();
912         subtitle.sub.rectangle.height *= text->y_scale ();
913
914         PlayerText ps;
915         shared_ptr<Image> image = subtitle.sub.image;
916         /* We will scale the subtitle up to fit _video_container_size */
917         dcp::Size scaled_size (subtitle.sub.rectangle.width * _video_container_size.width, subtitle.sub.rectangle.height * _video_container_size.height);
918         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
919         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
920
921         _active_texts[text->type()].add_from (wc, ps, from);
922 }
923
924 void
925 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
926 {
927         shared_ptr<Piece> piece = wp.lock ();
928         shared_ptr<const TextContent> text = wc.lock ();
929         if (!piece || !text) {
930                 return;
931         }
932
933         PlayerText ps;
934         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
935
936         if (from > piece->content->end(_film)) {
937                 return;
938         }
939
940         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
941                 s.set_h_position (s.h_position() + text->x_offset ());
942                 s.set_v_position (s.v_position() + text->y_offset ());
943                 float const xs = text->x_scale();
944                 float const ys = text->y_scale();
945                 float size = s.size();
946
947                 /* Adjust size to express the common part of the scaling;
948                    e.g. if xs = ys = 0.5 we scale size by 2.
949                 */
950                 if (xs > 1e-5 && ys > 1e-5) {
951                         size *= 1 / min (1 / xs, 1 / ys);
952                 }
953                 s.set_size (size);
954
955                 /* Then express aspect ratio changes */
956                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
957                         s.set_aspect_adjust (xs / ys);
958                 }
959
960                 s.set_in (dcp::Time(from.seconds(), 1000));
961                 ps.string.push_back (StringText (s, text->outline_width()));
962                 ps.add_fonts (text->fonts ());
963         }
964
965         _active_texts[text->type()].add_from (wc, ps, from);
966 }
967
968 void
969 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
970 {
971         shared_ptr<const TextContent> text = wc.lock ();
972         if (!text) {
973                 return;
974         }
975
976         if (!_active_texts[text->type()].have(wc)) {
977                 return;
978         }
979
980         shared_ptr<Piece> piece = wp.lock ();
981         if (!piece) {
982                 return;
983         }
984
985         DCPTime const dcp_to = content_time_to_dcp (piece, to);
986
987         if (dcp_to > piece->content->end(_film)) {
988                 return;
989         }
990
991         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
992
993         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
994         if (text->use() && !always && !text->burn()) {
995                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
996         }
997 }
998
999 void
1000 Player::seek (DCPTime time, bool accurate)
1001 {
1002         boost::mutex::scoped_lock lm (_mutex);
1003
1004         if (_suspended) {
1005                 /* We can't seek in this state */
1006                 return;
1007         }
1008
1009         if (_shuffler) {
1010                 _shuffler->clear ();
1011         }
1012
1013         _delay.clear ();
1014
1015         if (_audio_processor) {
1016                 _audio_processor->flush ();
1017         }
1018
1019         _audio_merger.clear ();
1020         for (int i = 0; i < TEXT_COUNT; ++i) {
1021                 _active_texts[i].clear ();
1022         }
1023
1024         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1025                 if (time < i->content->position()) {
1026                         /* Before; seek to the start of the content */
1027                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1028                         i->done = false;
1029                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1030                         /* During; seek to position */
1031                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1032                         i->done = false;
1033                 } else {
1034                         /* After; this piece is done */
1035                         i->done = true;
1036                 }
1037         }
1038
1039         if (accurate) {
1040                 _last_video_time = time;
1041                 _last_video_eyes = EYES_LEFT;
1042                 _last_audio_time = time;
1043         } else {
1044                 _last_video_time = optional<DCPTime>();
1045                 _last_video_eyes = optional<Eyes>();
1046                 _last_audio_time = optional<DCPTime>();
1047         }
1048
1049         _black.set_position (time);
1050         _silent.set_position (time);
1051
1052         _last_video.clear ();
1053 }
1054
1055 void
1056 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1057 {
1058         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1059            player before the video that requires them.
1060         */
1061         _delay.push_back (make_pair (pv, time));
1062
1063         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1064                 _last_video_time = time + one_video_frame();
1065         }
1066         _last_video_eyes = increment_eyes (pv->eyes());
1067
1068         if (_delay.size() < 3) {
1069                 return;
1070         }
1071
1072         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1073         _delay.pop_front();
1074         do_emit_video (to_do.first, to_do.second);
1075 }
1076
1077 void
1078 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1079 {
1080         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1081                 for (int i = 0; i < TEXT_COUNT; ++i) {
1082                         _active_texts[i].clear_before (time);
1083                 }
1084         }
1085
1086         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1087         if (subtitles) {
1088                 pv->set_text (subtitles.get ());
1089         }
1090
1091         Video (pv, time);
1092 }
1093
1094 void
1095 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1096 {
1097         /* Log if the assert below is about to fail */
1098         if (_last_audio_time && time != *_last_audio_time) {
1099                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1100         }
1101
1102         /* This audio must follow on from the previous */
1103         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1104         Audio (data, time);
1105         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1106 }
1107
1108 void
1109 Player::fill_audio (DCPTimePeriod period)
1110 {
1111         if (period.from == period.to) {
1112                 return;
1113         }
1114
1115         DCPOMATIC_ASSERT (period.from < period.to);
1116
1117         DCPTime t = period.from;
1118         while (t < period.to) {
1119                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1120                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1121                 if (samples) {
1122                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1123                         silence->make_silent ();
1124                         emit_audio (silence, t);
1125                 }
1126                 t += block;
1127         }
1128 }
1129
1130 DCPTime
1131 Player::one_video_frame () const
1132 {
1133         return DCPTime::from_frames (1, _film->video_frame_rate ());
1134 }
1135
1136 pair<shared_ptr<AudioBuffers>, DCPTime>
1137 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1138 {
1139         DCPTime const discard_time = discard_to - time;
1140         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1141         Frame remaining_frames = audio->frames() - discard_frames;
1142         if (remaining_frames <= 0) {
1143                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1144         }
1145         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1146         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1147         return make_pair(cut, time + discard_time);
1148 }
1149
1150 void
1151 Player::set_dcp_decode_reduction (optional<int> reduction)
1152 {
1153         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1154
1155         {
1156                 boost::mutex::scoped_lock lm (_mutex);
1157
1158                 if (reduction == _dcp_decode_reduction) {
1159                         lm.unlock ();
1160                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1161                         return;
1162                 }
1163
1164                 _dcp_decode_reduction = reduction;
1165                 setup_pieces_unlocked ();
1166         }
1167
1168         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1169 }
1170
1171 optional<DCPTime>
1172 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1173 {
1174         boost::mutex::scoped_lock lm (_mutex);
1175
1176         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1177                 if (i->content == content) {
1178                         return content_time_to_dcp (i, t);
1179                 }
1180         }
1181
1182         /* We couldn't find this content; perhaps things are being changed over */
1183         return optional<DCPTime>();
1184 }