0c3aea0288ef057f531ef315333a01277c945655
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1));
102         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
103         set_video_container_size (_film->frame_size ());
104
105         film_changed (Film::AUDIO_PROCESSOR);
106
107         setup_pieces ();
108         seek (DCPTime (), true);
109 }
110
111 Player::~Player ()
112 {
113         delete _shuffler;
114 }
115
116 void
117 Player::setup_pieces ()
118 {
119         boost::mutex::scoped_lock lm (_mutex);
120         setup_pieces_unlocked ();
121 }
122
123 void
124 Player::setup_pieces_unlocked ()
125 {
126         _pieces.clear ();
127
128         delete _shuffler;
129         _shuffler = new Shuffler();
130         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
131
132         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
133
134                 if (!i->paths_valid ()) {
135                         continue;
136                 }
137
138                 if (_ignore_video && _ignore_audio && i->text.empty()) {
139                         /* We're only interested in text and this content has none */
140                         continue;
141                 }
142
143                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
144                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
145
146                 if (!decoder) {
147                         /* Not something that we can decode; e.g. Atmos content */
148                         continue;
149                 }
150
151                 if (decoder->video && _ignore_video) {
152                         decoder->video->set_ignore (true);
153                 }
154
155                 if (decoder->audio && _ignore_audio) {
156                         decoder->audio->set_ignore (true);
157                 }
158
159                 if (_ignore_text) {
160                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
161                                 i->set_ignore (true);
162                         }
163                 }
164
165                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
166                 if (dcp) {
167                         dcp->set_decode_referenced (_play_referenced);
168                         if (_play_referenced) {
169                                 dcp->set_forced_reduction (_dcp_decode_reduction);
170                         }
171                 }
172
173                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
174                 _pieces.push_back (piece);
175
176                 if (decoder->video) {
177                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
178                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
179                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
180                         } else {
181                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
182                         }
183                 }
184
185                 if (decoder->audio) {
186                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
187                 }
188
189                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
190
191                 while (j != decoder->text.end()) {
192                         (*j)->BitmapStart.connect (
193                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
194                                 );
195                         (*j)->PlainStart.connect (
196                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
197                                 );
198                         (*j)->Stop.connect (
199                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
200                                 );
201
202                         ++j;
203                 }
204         }
205
206         _stream_states.clear ();
207         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
208                 if (i->content->audio) {
209                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
210                                 _stream_states[j] = StreamState (i, i->content->position ());
211                         }
212                 }
213         }
214
215         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
216         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
217
218         _last_video_time = DCPTime ();
219         _last_video_eyes = EYES_BOTH;
220         _last_audio_time = DCPTime ();
221         _suspended = false;
222 }
223
224 void
225 Player::playlist_content_change (ChangeType type, int property, bool frequent)
226 {
227         if (type == CHANGE_TYPE_PENDING) {
228                 boost::mutex::scoped_lock lm (_mutex);
229                 /* The player content is probably about to change, so we can't carry on
230                    until that has happened and we've rebuilt our pieces.  Stop pass()
231                    and seek() from working until then.
232                 */
233                 _suspended = true;
234         } else if (type == CHANGE_TYPE_DONE) {
235                 /* A change in our content has gone through.  Re-build our pieces. */
236                 setup_pieces ();
237         }
238
239         Change (type, property, frequent);
240 }
241
242 void
243 Player::set_video_container_size (dcp::Size s)
244 {
245         {
246                 boost::mutex::scoped_lock lm (_mutex);
247
248                 if (s == _video_container_size) {
249                         return;
250                 }
251
252                 _video_container_size = s;
253
254                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
255                 _black_image->make_black ();
256         }
257
258         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
259 }
260
261 void
262 Player::playlist_change (ChangeType type)
263 {
264         if (type == CHANGE_TYPE_DONE) {
265                 setup_pieces ();
266         }
267         Change (type, PlayerProperty::PLAYLIST, false);
268 }
269
270 void
271 Player::film_changed (Film::Property p)
272 {
273         /* Here we should notice Film properties that affect our output, and
274            alert listeners that our output now would be different to how it was
275            last time we were run.
276         */
277
278         if (p == Film::CONTAINER) {
279                 Change (CHANGE_TYPE_PENDING, PlayerProperty::FILM_CONTAINER, false);
280         } else if (p == Film::VIDEO_FRAME_RATE) {
281                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
282                    so we need new pieces here.
283                 */
284                 /* XXX: missing PENDING! */
285                 setup_pieces ();
286                 Change (CHANGE_TYPE_DONE, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
287         } else if (p == Film::AUDIO_PROCESSOR) {
288                 if (_film->audio_processor ()) {
289                         boost::mutex::scoped_lock lm (_mutex);
290                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
291                 }
292         } else if (p == Film::AUDIO_CHANNELS) {
293                 boost::mutex::scoped_lock lm (_mutex);
294                 _audio_merger.clear ();
295         }
296 }
297
298 list<PositionImage>
299 Player::transform_bitmap_texts (list<BitmapText> subs) const
300 {
301         list<PositionImage> all;
302
303         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
304                 if (!i->image) {
305                         continue;
306                 }
307
308                 /* We will scale the subtitle up to fit _video_container_size */
309                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
310
311                 all.push_back (
312                         PositionImage (
313                                 i->image->scale (
314                                         scaled_size,
315                                         dcp::YUV_TO_RGB_REC601,
316                                         i->image->pixel_format (),
317                                         true,
318                                         _fast
319                                         ),
320                                 Position<int> (
321                                         lrint (_video_container_size.width * i->rectangle.x),
322                                         lrint (_video_container_size.height * i->rectangle.y)
323                                         )
324                                 )
325                         );
326         }
327
328         return all;
329 }
330
331 shared_ptr<PlayerVideo>
332 Player::black_player_video_frame (Eyes eyes) const
333 {
334         return shared_ptr<PlayerVideo> (
335                 new PlayerVideo (
336                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
337                         Crop (),
338                         optional<double> (),
339                         _video_container_size,
340                         _video_container_size,
341                         eyes,
342                         PART_WHOLE,
343                         PresetColourConversion::all().front().conversion,
344                         boost::weak_ptr<Content>(),
345                         boost::optional<Frame>()
346                 )
347         );
348 }
349
350 Frame
351 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
352 {
353         DCPTime s = t - piece->content->position ();
354         s = min (piece->content->length_after_trim(), s);
355         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
356
357         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
358            then convert that ContentTime to frames at the content's rate.  However this fails for
359            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
360            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
361
362            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
363         */
364         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
365 }
366
367 DCPTime
368 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
369 {
370         /* See comment in dcp_to_content_video */
371         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
372         return d + piece->content->position();
373 }
374
375 Frame
376 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
377 {
378         DCPTime s = t - piece->content->position ();
379         s = min (piece->content->length_after_trim(), s);
380         /* See notes in dcp_to_content_video */
381         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
382 }
383
384 DCPTime
385 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
386 {
387         /* See comment in dcp_to_content_video */
388         return DCPTime::from_frames (f, _film->audio_frame_rate())
389                 - DCPTime (piece->content->trim_start(), piece->frc)
390                 + piece->content->position();
391 }
392
393 ContentTime
394 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
395 {
396         DCPTime s = t - piece->content->position ();
397         s = min (piece->content->length_after_trim(), s);
398         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
399 }
400
401 DCPTime
402 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
403 {
404         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
405 }
406
407 list<shared_ptr<Font> >
408 Player::get_subtitle_fonts ()
409 {
410         boost::mutex::scoped_lock lm (_mutex);
411
412         list<shared_ptr<Font> > fonts;
413         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
414                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
415                         /* XXX: things may go wrong if there are duplicate font IDs
416                            with different font files.
417                         */
418                         list<shared_ptr<Font> > f = j->fonts ();
419                         copy (f.begin(), f.end(), back_inserter (fonts));
420                 }
421         }
422
423         return fonts;
424 }
425
426 /** Set this player never to produce any video data */
427 void
428 Player::set_ignore_video ()
429 {
430         boost::mutex::scoped_lock lm (_mutex);
431         _ignore_video = true;
432         setup_pieces_unlocked ();
433 }
434
435 void
436 Player::set_ignore_audio ()
437 {
438         boost::mutex::scoped_lock lm (_mutex);
439         _ignore_audio = true;
440         setup_pieces_unlocked ();
441 }
442
443 void
444 Player::set_ignore_text ()
445 {
446         boost::mutex::scoped_lock lm (_mutex);
447         _ignore_text = true;
448         setup_pieces_unlocked ();
449 }
450
451 /** Set the player to always burn open texts into the image regardless of the content settings */
452 void
453 Player::set_always_burn_open_subtitles ()
454 {
455         boost::mutex::scoped_lock lm (_mutex);
456         _always_burn_open_subtitles = true;
457 }
458
459 /** Sets up the player to be faster, possibly at the expense of quality */
460 void
461 Player::set_fast ()
462 {
463         boost::mutex::scoped_lock lm (_mutex);
464         _fast = true;
465         setup_pieces_unlocked ();
466 }
467
468 void
469 Player::set_play_referenced ()
470 {
471         boost::mutex::scoped_lock lm (_mutex);
472         _play_referenced = true;
473         setup_pieces_unlocked ();
474 }
475
476 list<ReferencedReelAsset>
477 Player::get_reel_assets ()
478 {
479         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
480
481         list<ReferencedReelAsset> a;
482
483         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
484                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
485                 if (!j) {
486                         continue;
487                 }
488
489                 scoped_ptr<DCPDecoder> decoder;
490                 try {
491                         decoder.reset (new DCPDecoder (j, _film->log(), false));
492                 } catch (...) {
493                         return a;
494                 }
495
496                 int64_t offset = 0;
497                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
498
499                         DCPOMATIC_ASSERT (j->video_frame_rate ());
500                         double const cfr = j->video_frame_rate().get();
501                         Frame const trim_start = j->trim_start().frames_round (cfr);
502                         Frame const trim_end = j->trim_end().frames_round (cfr);
503                         int const ffr = _film->video_frame_rate ();
504
505                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
506                         if (j->reference_video ()) {
507                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
508                                 DCPOMATIC_ASSERT (ra);
509                                 ra->set_entry_point (ra->entry_point() + trim_start);
510                                 ra->set_duration (ra->duration() - trim_start - trim_end);
511                                 a.push_back (
512                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
513                                         );
514                         }
515
516                         if (j->reference_audio ()) {
517                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
518                                 DCPOMATIC_ASSERT (ra);
519                                 ra->set_entry_point (ra->entry_point() + trim_start);
520                                 ra->set_duration (ra->duration() - trim_start - trim_end);
521                                 a.push_back (
522                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
523                                         );
524                         }
525
526                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
527                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
528                                 DCPOMATIC_ASSERT (ra);
529                                 ra->set_entry_point (ra->entry_point() + trim_start);
530                                 ra->set_duration (ra->duration() - trim_start - trim_end);
531                                 a.push_back (
532                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
533                                         );
534                         }
535
536                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
537                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
538                                 DCPOMATIC_ASSERT (ra);
539                                 ra->set_entry_point (ra->entry_point() + trim_start);
540                                 ra->set_duration (ra->duration() - trim_start - trim_end);
541                                 a.push_back (
542                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
543                                         );
544                         }
545
546                         /* Assume that main picture duration is the length of the reel */
547                         offset += k->main_picture()->duration ();
548                 }
549         }
550
551         return a;
552 }
553
554 bool
555 Player::pass ()
556 {
557         boost::mutex::scoped_lock lm (_mutex);
558
559         if (_suspended) {
560                 /* We can't pass in this state */
561                 return false;
562         }
563
564         if (_playlist->length() == DCPTime()) {
565                 /* Special case of an empty Film; just give one black frame */
566                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
567                 return true;
568         }
569
570         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
571
572         shared_ptr<Piece> earliest_content;
573         optional<DCPTime> earliest_time;
574
575         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
576                 if (i->done) {
577                         continue;
578                 }
579
580                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
581                 if (t > i->content->end()) {
582                         i->done = true;
583                 } else {
584
585                         /* Given two choices at the same time, pick the one with texts so we see it before
586                            the video.
587                         */
588                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
589                                 earliest_time = t;
590                                 earliest_content = i;
591                         }
592                 }
593         }
594
595         bool done = false;
596
597         enum {
598                 NONE,
599                 CONTENT,
600                 BLACK,
601                 SILENT
602         } which = NONE;
603
604         if (earliest_content) {
605                 which = CONTENT;
606         }
607
608         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
609                 earliest_time = _black.position ();
610                 which = BLACK;
611         }
612
613         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
614                 earliest_time = _silent.position ();
615                 which = SILENT;
616         }
617
618         switch (which) {
619         case CONTENT:
620                 earliest_content->done = earliest_content->decoder->pass ();
621                 break;
622         case BLACK:
623                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
624                 _black.set_position (_black.position() + one_video_frame());
625                 break;
626         case SILENT:
627         {
628                 DCPTimePeriod period (_silent.period_at_position());
629                 if (_last_audio_time) {
630                         /* Sometimes the thing that happened last finishes fractionally before
631                            this silence.  Bodge the start time of the silence to fix it.  I'm
632                            not sure if this is the right solution --- maybe the last thing should
633                            be padded `forward' rather than this thing padding `back'.
634                         */
635                         period.from = min(period.from, *_last_audio_time);
636                 }
637                 if (period.duration() > one_video_frame()) {
638                         period.to = period.from + one_video_frame();
639                 }
640                 fill_audio (period);
641                 _silent.set_position (period.to);
642                 break;
643         }
644         case NONE:
645                 done = true;
646                 break;
647         }
648
649         /* Emit any audio that is ready */
650
651         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
652            of our streams, or the position of the _silent.
653         */
654         DCPTime pull_to = _film->length ();
655         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
656                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
657                         pull_to = i->second.last_push_end;
658                 }
659         }
660         if (!_silent.done() && _silent.position() < pull_to) {
661                 pull_to = _silent.position();
662         }
663
664         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
665         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
666                 if (_last_audio_time && i->second < *_last_audio_time) {
667                         /* This new data comes before the last we emitted (or the last seek); discard it */
668                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
669                         if (!cut.first) {
670                                 continue;
671                         }
672                         *i = cut;
673                 } else if (_last_audio_time && i->second > *_last_audio_time) {
674                         /* There's a gap between this data and the last we emitted; fill with silence */
675                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
676                 }
677
678                 emit_audio (i->first, i->second);
679         }
680
681         if (done) {
682                 _shuffler->flush ();
683                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
684                         do_emit_video(i->first, i->second);
685                 }
686         }
687
688         return done;
689 }
690
691 /** @return Open subtitles for the frame at the given time, converted to images */
692 optional<PositionImage>
693 Player::open_subtitles_for_frame (DCPTime time) const
694 {
695         list<PositionImage> captions;
696         int const vfr = _film->video_frame_rate();
697
698         BOOST_FOREACH (
699                 PlayerText j,
700                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
701                 ) {
702
703                 /* Bitmap subtitles */
704                 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
705                 copy (c.begin(), c.end(), back_inserter (captions));
706
707                 /* String subtitles (rendered to an image) */
708                 if (!j.string.empty ()) {
709                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
710                         copy (s.begin(), s.end(), back_inserter (captions));
711                 }
712         }
713
714         if (captions.empty ()) {
715                 return optional<PositionImage> ();
716         }
717
718         return merge (captions);
719 }
720
721 void
722 Player::video (weak_ptr<Piece> wp, ContentVideo video)
723 {
724         shared_ptr<Piece> piece = wp.lock ();
725         if (!piece) {
726                 return;
727         }
728
729         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
730         if (frc.skip && (video.frame % 2) == 1) {
731                 return;
732         }
733
734         /* Time of the first frame we will emit */
735         DCPTime const time = content_video_to_dcp (piece, video.frame);
736
737         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
738            if it's after the content's period here as in that case we still need to fill any gap between
739            `now' and the end of the content's period.
740         */
741         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
742                 return;
743         }
744
745         /* Fill gaps that we discover now that we have some video which needs to be emitted.
746            This is where we need to fill to.
747         */
748         DCPTime fill_to = min (time, piece->content->end());
749
750         if (_last_video_time) {
751                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
752                 LastVideoMap::const_iterator last = _last_video.find (wp);
753                 if (_film->three_d()) {
754                         Eyes fill_to_eyes = video.eyes;
755                         if (fill_to == piece->content->end()) {
756                                 /* Don't fill after the end of the content */
757                                 fill_to_eyes = EYES_LEFT;
758                         }
759                         DCPTime j = fill_from;
760                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
761                         if (eyes == EYES_BOTH) {
762                                 eyes = EYES_LEFT;
763                         }
764                         while (j < fill_to || eyes != fill_to_eyes) {
765                                 if (last != _last_video.end()) {
766                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
767                                         copy->set_eyes (eyes);
768                                         emit_video (copy, j);
769                                 } else {
770                                         emit_video (black_player_video_frame(eyes), j);
771                                 }
772                                 if (eyes == EYES_RIGHT) {
773                                         j += one_video_frame();
774                                 }
775                                 eyes = increment_eyes (eyes);
776                         }
777                 } else {
778                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
779                                 if (last != _last_video.end()) {
780                                         emit_video (last->second, j);
781                                 } else {
782                                         emit_video (black_player_video_frame(EYES_BOTH), j);
783                                 }
784                         }
785                 }
786         }
787
788         _last_video[wp].reset (
789                 new PlayerVideo (
790                         video.image,
791                         piece->content->video->crop (),
792                         piece->content->video->fade (video.frame),
793                         piece->content->video->scale().size (
794                                 piece->content->video, _video_container_size, _film->frame_size ()
795                                 ),
796                         _video_container_size,
797                         video.eyes,
798                         video.part,
799                         piece->content->video->colour_conversion(),
800                         piece->content,
801                         video.frame
802                         )
803                 );
804
805         DCPTime t = time;
806         for (int i = 0; i < frc.repeat; ++i) {
807                 if (t < piece->content->end()) {
808                         emit_video (_last_video[wp], t);
809                 }
810                 t += one_video_frame ();
811         }
812 }
813
814 void
815 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
816 {
817         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
818
819         shared_ptr<Piece> piece = wp.lock ();
820         if (!piece) {
821                 return;
822         }
823
824         shared_ptr<AudioContent> content = piece->content->audio;
825         DCPOMATIC_ASSERT (content);
826
827         /* Compute time in the DCP */
828         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
829         /* And the end of this block in the DCP */
830         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
831
832         /* Remove anything that comes before the start or after the end of the content */
833         if (time < piece->content->position()) {
834                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
835                 if (!cut.first) {
836                         /* This audio is entirely discarded */
837                         return;
838                 }
839                 content_audio.audio = cut.first;
840                 time = cut.second;
841         } else if (time > piece->content->end()) {
842                 /* Discard it all */
843                 return;
844         } else if (end > piece->content->end()) {
845                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
846                 if (remaining_frames == 0) {
847                         return;
848                 }
849                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
850                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
851                 content_audio.audio = cut;
852         }
853
854         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
855
856         /* Gain */
857
858         if (content->gain() != 0) {
859                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
860                 gain->apply_gain (content->gain ());
861                 content_audio.audio = gain;
862         }
863
864         /* Remap */
865
866         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
867
868         /* Process */
869
870         if (_audio_processor) {
871                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
872         }
873
874         /* Push */
875
876         _audio_merger.push (content_audio.audio, time);
877         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
878         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
879 }
880
881 void
882 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
883 {
884         shared_ptr<Piece> piece = wp.lock ();
885         shared_ptr<const TextContent> text = wc.lock ();
886         if (!piece || !text) {
887                 return;
888         }
889
890         /* Apply content's subtitle offsets */
891         subtitle.sub.rectangle.x += text->x_offset ();
892         subtitle.sub.rectangle.y += text->y_offset ();
893
894         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
895         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
896         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
897
898         /* Apply content's subtitle scale */
899         subtitle.sub.rectangle.width *= text->x_scale ();
900         subtitle.sub.rectangle.height *= text->y_scale ();
901
902         PlayerText ps;
903         ps.bitmap.push_back (subtitle.sub);
904         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
905
906         _active_texts[subtitle.type()].add_from (wc, ps, from);
907 }
908
909 void
910 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
911 {
912         shared_ptr<Piece> piece = wp.lock ();
913         shared_ptr<const TextContent> text = wc.lock ();
914         if (!piece || !text) {
915                 return;
916         }
917
918         PlayerText ps;
919         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
920
921         if (from > piece->content->end()) {
922                 return;
923         }
924
925         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
926                 s.set_h_position (s.h_position() + text->x_offset ());
927                 s.set_v_position (s.v_position() + text->y_offset ());
928                 float const xs = text->x_scale();
929                 float const ys = text->y_scale();
930                 float size = s.size();
931
932                 /* Adjust size to express the common part of the scaling;
933                    e.g. if xs = ys = 0.5 we scale size by 2.
934                 */
935                 if (xs > 1e-5 && ys > 1e-5) {
936                         size *= 1 / min (1 / xs, 1 / ys);
937                 }
938                 s.set_size (size);
939
940                 /* Then express aspect ratio changes */
941                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
942                         s.set_aspect_adjust (xs / ys);
943                 }
944
945                 s.set_in (dcp::Time(from.seconds(), 1000));
946                 ps.string.push_back (StringText (s, text->outline_width()));
947                 ps.add_fonts (text->fonts ());
948         }
949
950         _active_texts[subtitle.type()].add_from (wc, ps, from);
951 }
952
953 void
954 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
955 {
956         if (!_active_texts[type].have (wc)) {
957                 return;
958         }
959
960         shared_ptr<Piece> piece = wp.lock ();
961         shared_ptr<const TextContent> text = wc.lock ();
962         if (!piece || !text) {
963                 return;
964         }
965
966         DCPTime const dcp_to = content_time_to_dcp (piece, to);
967
968         if (dcp_to > piece->content->end()) {
969                 return;
970         }
971
972         pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
973
974         bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
975         if (text->use() && !always && !text->burn()) {
976                 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
977         }
978 }
979
980 void
981 Player::seek (DCPTime time, bool accurate)
982 {
983         boost::mutex::scoped_lock lm (_mutex);
984
985         if (_suspended) {
986                 /* We can't seek in this state */
987                 return;
988         }
989
990         if (_shuffler) {
991                 _shuffler->clear ();
992         }
993
994         _delay.clear ();
995
996         if (_audio_processor) {
997                 _audio_processor->flush ();
998         }
999
1000         _audio_merger.clear ();
1001         for (int i = 0; i < TEXT_COUNT; ++i) {
1002                 _active_texts[i].clear ();
1003         }
1004
1005         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1006                 if (time < i->content->position()) {
1007                         /* Before; seek to the start of the content */
1008                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1009                         i->done = false;
1010                 } else if (i->content->position() <= time && time < i->content->end()) {
1011                         /* During; seek to position */
1012                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1013                         i->done = false;
1014                 } else {
1015                         /* After; this piece is done */
1016                         i->done = true;
1017                 }
1018         }
1019
1020         if (accurate) {
1021                 _last_video_time = time;
1022                 _last_video_eyes = EYES_LEFT;
1023                 _last_audio_time = time;
1024         } else {
1025                 _last_video_time = optional<DCPTime>();
1026                 _last_video_eyes = optional<Eyes>();
1027                 _last_audio_time = optional<DCPTime>();
1028         }
1029
1030         _black.set_position (time);
1031         _silent.set_position (time);
1032
1033         _last_video.clear ();
1034 }
1035
1036 void
1037 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1038 {
1039         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1040            player before the video that requires them.
1041         */
1042         _delay.push_back (make_pair (pv, time));
1043
1044         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1045                 _last_video_time = time + one_video_frame();
1046         }
1047         _last_video_eyes = increment_eyes (pv->eyes());
1048
1049         if (_delay.size() < 3) {
1050                 return;
1051         }
1052
1053         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1054         _delay.pop_front();
1055         do_emit_video (to_do.first, to_do.second);
1056 }
1057
1058 void
1059 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1060 {
1061         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1062                 for (int i = 0; i < TEXT_COUNT; ++i) {
1063                         _active_texts[i].clear_before (time);
1064                 }
1065         }
1066
1067         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1068         if (subtitles) {
1069                 pv->set_text (subtitles.get ());
1070         }
1071
1072         Video (pv, time);
1073 }
1074
1075 void
1076 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1077 {
1078         /* Log if the assert below is about to fail */
1079         if (_last_audio_time && time != *_last_audio_time) {
1080                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1081         }
1082
1083         /* This audio must follow on from the previous */
1084         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1085         Audio (data, time);
1086         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1087 }
1088
1089 void
1090 Player::fill_audio (DCPTimePeriod period)
1091 {
1092         if (period.from == period.to) {
1093                 return;
1094         }
1095
1096         DCPOMATIC_ASSERT (period.from < period.to);
1097
1098         DCPTime t = period.from;
1099         while (t < period.to) {
1100                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1101                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1102                 if (samples) {
1103                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1104                         silence->make_silent ();
1105                         emit_audio (silence, t);
1106                 }
1107                 t += block;
1108         }
1109 }
1110
1111 DCPTime
1112 Player::one_video_frame () const
1113 {
1114         return DCPTime::from_frames (1, _film->video_frame_rate ());
1115 }
1116
1117 pair<shared_ptr<AudioBuffers>, DCPTime>
1118 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1119 {
1120         DCPTime const discard_time = discard_to - time;
1121         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1122         Frame remaining_frames = audio->frames() - discard_frames;
1123         if (remaining_frames <= 0) {
1124                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1125         }
1126         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1127         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1128         return make_pair(cut, time + discard_time);
1129 }
1130
1131 void
1132 Player::set_dcp_decode_reduction (optional<int> reduction)
1133 {
1134         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1135
1136         {
1137                 boost::mutex::scoped_lock lm (_mutex);
1138
1139                 if (reduction == _dcp_decode_reduction) {
1140                         lm.unlock ();
1141                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1142                         return;
1143                 }
1144
1145                 _dcp_decode_reduction = reduction;
1146                 setup_pieces_unlocked ();
1147         }
1148
1149         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1150 }
1151
1152 optional<DCPTime>
1153 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1154 {
1155         boost::mutex::scoped_lock lm (_mutex);
1156
1157         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1158                 if (i->content == content) {
1159                         return content_time_to_dcp (i, t);
1160                 }
1161         }
1162
1163         /* We couldn't find this content; perhaps things are being changed over */
1164         return optional<DCPTime>();
1165 }