Basics of multiple captions per content so that DCPContent can
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_caption (false)
93         , _fast (false)
94         , _play_referenced (false)
95         , _audio_merger (_film->audio_frame_rate())
96         , _shuffler (0)
97 {
98         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
99         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
100         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
101         set_video_container_size (_film->frame_size ());
102
103         film_changed (Film::AUDIO_PROCESSOR);
104
105         seek (DCPTime (), true);
106 }
107
108 Player::~Player ()
109 {
110         delete _shuffler;
111 }
112
113 void
114 Player::setup_pieces ()
115 {
116         _pieces.clear ();
117
118         delete _shuffler;
119         _shuffler = new Shuffler();
120         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
121
122         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
123
124                 if (!i->paths_valid ()) {
125                         continue;
126                 }
127
128                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
129                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
130
131                 if (!decoder) {
132                         /* Not something that we can decode; e.g. Atmos content */
133                         continue;
134                 }
135
136                 if (decoder->video && _ignore_video) {
137                         decoder->video->set_ignore (true);
138                 }
139
140                 if (_ignore_caption) {
141                         BOOST_FOREACH (shared_ptr<CaptionDecoder> i, decoder->caption) {
142                                 i->set_ignore (true);
143                         }
144                 }
145
146                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
147                 if (dcp) {
148                         dcp->set_decode_referenced (_play_referenced);
149                         if (_play_referenced) {
150                                 dcp->set_forced_reduction (_dcp_decode_reduction);
151                         }
152                 }
153
154                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
155                 _pieces.push_back (piece);
156
157                 if (decoder->video) {
158                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
159                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
160                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
161                         } else {
162                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
163                         }
164                 }
165
166                 if (decoder->audio) {
167                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
168                 }
169
170                 list<shared_ptr<CaptionDecoder> >::const_iterator j = decoder->caption.begin();
171
172                 while (j != decoder->caption.end()) {
173                         (*j)->BitmapStart.connect (
174                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
175                                 );
176                         (*j)->PlainStart.connect (
177                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1)
178                                 );
179                         (*j)->Stop.connect (
180                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const CaptionContent>((*j)->content()), _1, _2)
181                                 );
182
183                         ++j;
184                 }
185         }
186
187         _stream_states.clear ();
188         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
189                 if (i->content->audio) {
190                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
191                                 _stream_states[j] = StreamState (i, i->content->position ());
192                         }
193                 }
194         }
195
196         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
197         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
198
199         _last_video_time = DCPTime ();
200         _last_video_eyes = EYES_BOTH;
201         _last_audio_time = DCPTime ();
202         _have_valid_pieces = true;
203 }
204
205 void
206 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
207 {
208         shared_ptr<Content> c = w.lock ();
209         if (!c) {
210                 return;
211         }
212
213         if (
214                 property == ContentProperty::POSITION ||
215                 property == ContentProperty::LENGTH ||
216                 property == ContentProperty::TRIM_START ||
217                 property == ContentProperty::TRIM_END ||
218                 property == ContentProperty::PATH ||
219                 property == VideoContentProperty::FRAME_TYPE ||
220                 property == VideoContentProperty::COLOUR_CONVERSION ||
221                 property == AudioContentProperty::STREAMS ||
222                 property == DCPContentProperty::NEEDS_ASSETS ||
223                 property == DCPContentProperty::NEEDS_KDM ||
224                 property == CaptionContentProperty::COLOUR ||
225                 property == CaptionContentProperty::EFFECT ||
226                 property == CaptionContentProperty::EFFECT_COLOUR ||
227                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
228                 property == FFmpegContentProperty::FILTERS
229                 ) {
230
231                 _have_valid_pieces = false;
232                 Changed (property, frequent);
233
234         } else if (
235                 property == CaptionContentProperty::LINE_SPACING ||
236                 property == CaptionContentProperty::OUTLINE_WIDTH ||
237                 property == CaptionContentProperty::Y_SCALE ||
238                 property == CaptionContentProperty::FADE_IN ||
239                 property == CaptionContentProperty::FADE_OUT ||
240                 property == ContentProperty::VIDEO_FRAME_RATE ||
241                 property == CaptionContentProperty::USE ||
242                 property == CaptionContentProperty::X_OFFSET ||
243                 property == CaptionContentProperty::Y_OFFSET ||
244                 property == CaptionContentProperty::X_SCALE ||
245                 property == CaptionContentProperty::FONTS ||
246                 property == CaptionContentProperty::TYPE ||
247                 property == VideoContentProperty::CROP ||
248                 property == VideoContentProperty::SCALE ||
249                 property == VideoContentProperty::FADE_IN ||
250                 property == VideoContentProperty::FADE_OUT
251                 ) {
252
253                 Changed (property, frequent);
254         }
255 }
256
257 void
258 Player::set_video_container_size (dcp::Size s)
259 {
260         if (s == _video_container_size) {
261                 return;
262         }
263
264         _video_container_size = s;
265
266         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
267         _black_image->make_black ();
268
269         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
270 }
271
272 void
273 Player::playlist_changed ()
274 {
275         _have_valid_pieces = false;
276         Changed (PlayerProperty::PLAYLIST, false);
277 }
278
279 void
280 Player::film_changed (Film::Property p)
281 {
282         /* Here we should notice Film properties that affect our output, and
283            alert listeners that our output now would be different to how it was
284            last time we were run.
285         */
286
287         if (p == Film::CONTAINER) {
288                 Changed (PlayerProperty::FILM_CONTAINER, false);
289         } else if (p == Film::VIDEO_FRAME_RATE) {
290                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
291                    so we need new pieces here.
292                 */
293                 _have_valid_pieces = false;
294                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
295         } else if (p == Film::AUDIO_PROCESSOR) {
296                 if (_film->audio_processor ()) {
297                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
298                 }
299         } else if (p == Film::AUDIO_CHANNELS) {
300                 _audio_merger.clear ();
301         }
302 }
303
304 list<PositionImage>
305 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
306 {
307         list<PositionImage> all;
308
309         for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
310                 if (!i->image) {
311                         continue;
312                 }
313
314                 /* We will scale the subtitle up to fit _video_container_size */
315                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
316
317                 all.push_back (
318                         PositionImage (
319                                 i->image->scale (
320                                         scaled_size,
321                                         dcp::YUV_TO_RGB_REC601,
322                                         i->image->pixel_format (),
323                                         true,
324                                         _fast
325                                         ),
326                                 Position<int> (
327                                         lrint (_video_container_size.width * i->rectangle.x),
328                                         lrint (_video_container_size.height * i->rectangle.y)
329                                         )
330                                 )
331                         );
332         }
333
334         return all;
335 }
336
337 shared_ptr<PlayerVideo>
338 Player::black_player_video_frame (Eyes eyes) const
339 {
340         return shared_ptr<PlayerVideo> (
341                 new PlayerVideo (
342                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
343                         Crop (),
344                         optional<double> (),
345                         _video_container_size,
346                         _video_container_size,
347                         eyes,
348                         PART_WHOLE,
349                         PresetColourConversion::all().front().conversion,
350                         boost::weak_ptr<Content>(),
351                         boost::optional<Frame>()
352                 )
353         );
354 }
355
356 Frame
357 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
358 {
359         DCPTime s = t - piece->content->position ();
360         s = min (piece->content->length_after_trim(), s);
361         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
362
363         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
364            then convert that ContentTime to frames at the content's rate.  However this fails for
365            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
366            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
367
368            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
369         */
370         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
371 }
372
373 DCPTime
374 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
375 {
376         /* See comment in dcp_to_content_video */
377         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
378         return d + piece->content->position();
379 }
380
381 Frame
382 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
383 {
384         DCPTime s = t - piece->content->position ();
385         s = min (piece->content->length_after_trim(), s);
386         /* See notes in dcp_to_content_video */
387         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
388 }
389
390 DCPTime
391 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
392 {
393         /* See comment in dcp_to_content_video */
394         return DCPTime::from_frames (f, _film->audio_frame_rate())
395                 - DCPTime (piece->content->trim_start(), piece->frc)
396                 + piece->content->position();
397 }
398
399 ContentTime
400 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
401 {
402         DCPTime s = t - piece->content->position ();
403         s = min (piece->content->length_after_trim(), s);
404         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
405 }
406
407 DCPTime
408 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
409 {
410         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
411 }
412
413 list<shared_ptr<Font> >
414 Player::get_subtitle_fonts ()
415 {
416         if (!_have_valid_pieces) {
417                 setup_pieces ();
418         }
419
420         list<shared_ptr<Font> > fonts;
421         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
422                 BOOST_FOREACH (shared_ptr<CaptionContent> j, i->content->caption) {
423                         /* XXX: things may go wrong if there are duplicate font IDs
424                            with different font files.
425                         */
426                         list<shared_ptr<Font> > f = j->fonts ();
427                         copy (f.begin(), f.end(), back_inserter (fonts));
428                 }
429         }
430
431         return fonts;
432 }
433
434 /** Set this player never to produce any video data */
435 void
436 Player::set_ignore_video ()
437 {
438         _ignore_video = true;
439 }
440
441 void
442 Player::set_ignore_caption ()
443 {
444         _ignore_caption = true;
445 }
446
447 /** Set a type of caption that this player should always burn into the image,
448  *  regardless of the content settings.
449  *  @param type type of captions to burn.
450  */
451 void
452 Player::set_always_burn_captions (CaptionType type)
453 {
454         _always_burn_captions = type;
455 }
456
457 /** Sets up the player to be faster, possibly at the expense of quality */
458 void
459 Player::set_fast ()
460 {
461         _fast = true;
462         _have_valid_pieces = false;
463 }
464
465 void
466 Player::set_play_referenced ()
467 {
468         _play_referenced = true;
469         _have_valid_pieces = false;
470 }
471
472 list<ReferencedReelAsset>
473 Player::get_reel_assets ()
474 {
475         list<ReferencedReelAsset> a;
476
477         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
478                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
479                 if (!j) {
480                         continue;
481                 }
482
483                 scoped_ptr<DCPDecoder> decoder;
484                 try {
485                         decoder.reset (new DCPDecoder (j, _film->log(), false));
486                 } catch (...) {
487                         return a;
488                 }
489
490                 int64_t offset = 0;
491                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
492
493                         DCPOMATIC_ASSERT (j->video_frame_rate ());
494                         double const cfr = j->video_frame_rate().get();
495                         Frame const trim_start = j->trim_start().frames_round (cfr);
496                         Frame const trim_end = j->trim_end().frames_round (cfr);
497                         int const ffr = _film->video_frame_rate ();
498
499                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
500                         if (j->reference_video ()) {
501                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
502                                 DCPOMATIC_ASSERT (ra);
503                                 ra->set_entry_point (ra->entry_point() + trim_start);
504                                 ra->set_duration (ra->duration() - trim_start - trim_end);
505                                 a.push_back (
506                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
507                                         );
508                         }
509
510                         if (j->reference_audio ()) {
511                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
512                                 DCPOMATIC_ASSERT (ra);
513                                 ra->set_entry_point (ra->entry_point() + trim_start);
514                                 ra->set_duration (ra->duration() - trim_start - trim_end);
515                                 a.push_back (
516                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
517                                         );
518                         }
519
520                         if (j->reference_caption (CAPTION_OPEN)) {
521                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
522                                 DCPOMATIC_ASSERT (ra);
523                                 ra->set_entry_point (ra->entry_point() + trim_start);
524                                 ra->set_duration (ra->duration() - trim_start - trim_end);
525                                 a.push_back (
526                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
527                                         );
528                         }
529
530                         if (j->reference_caption (CAPTION_CLOSED)) {
531                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
532                                 DCPOMATIC_ASSERT (ra);
533                                 ra->set_entry_point (ra->entry_point() + trim_start);
534                                 ra->set_duration (ra->duration() - trim_start - trim_end);
535                                 a.push_back (
536                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
537                                         );
538                         }
539
540                         /* Assume that main picture duration is the length of the reel */
541                         offset += k->main_picture()->duration ();
542                 }
543         }
544
545         return a;
546 }
547
548 bool
549 Player::pass ()
550 {
551         if (!_have_valid_pieces) {
552                 setup_pieces ();
553         }
554
555         if (_playlist->length() == DCPTime()) {
556                 /* Special case of an empty Film; just give one black frame */
557                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
558                 return true;
559         }
560
561         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
562
563         shared_ptr<Piece> earliest_content;
564         optional<DCPTime> earliest_time;
565
566         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
567                 if (i->done) {
568                         continue;
569                 }
570
571                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
572                 if (t > i->content->end()) {
573                         i->done = true;
574                 } else {
575
576                         /* Given two choices at the same time, pick the one with captions so we see it before
577                            the video.
578                         */
579                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->caption.empty())) {
580                                 earliest_time = t;
581                                 earliest_content = i;
582                         }
583                 }
584         }
585
586         bool done = false;
587
588         enum {
589                 NONE,
590                 CONTENT,
591                 BLACK,
592                 SILENT
593         } which = NONE;
594
595         if (earliest_content) {
596                 which = CONTENT;
597         }
598
599         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
600                 earliest_time = _black.position ();
601                 which = BLACK;
602         }
603
604         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
605                 earliest_time = _silent.position ();
606                 which = SILENT;
607         }
608
609         switch (which) {
610         case CONTENT:
611                 earliest_content->done = earliest_content->decoder->pass ();
612                 break;
613         case BLACK:
614                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
615                 _black.set_position (_black.position() + one_video_frame());
616                 break;
617         case SILENT:
618         {
619                 DCPTimePeriod period (_silent.period_at_position());
620                 if (_last_audio_time) {
621                         /* Sometimes the thing that happened last finishes fractionally before
622                            this silence.  Bodge the start time of the silence to fix it.  I'm
623                            not sure if this is the right solution --- maybe the last thing should
624                            be padded `forward' rather than this thing padding `back'.
625                         */
626                         period.from = min(period.from, *_last_audio_time);
627                 }
628                 if (period.duration() > one_video_frame()) {
629                         period.to = period.from + one_video_frame();
630                 }
631                 fill_audio (period);
632                 _silent.set_position (period.to);
633                 break;
634         }
635         case NONE:
636                 done = true;
637                 break;
638         }
639
640         /* Emit any audio that is ready */
641
642         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
643            of our streams, or the position of the _silent.
644         */
645         DCPTime pull_to = _film->length ();
646         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
647                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
648                         pull_to = i->second.last_push_end;
649                 }
650         }
651         if (!_silent.done() && _silent.position() < pull_to) {
652                 pull_to = _silent.position();
653         }
654
655         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
656         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
657                 if (_last_audio_time && i->second < *_last_audio_time) {
658                         /* This new data comes before the last we emitted (or the last seek); discard it */
659                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
660                         if (!cut.first) {
661                                 continue;
662                         }
663                         *i = cut;
664                 } else if (_last_audio_time && i->second > *_last_audio_time) {
665                         /* There's a gap between this data and the last we emitted; fill with silence */
666                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
667                 }
668
669                 emit_audio (i->first, i->second);
670         }
671
672         if (done) {
673                 _shuffler->flush ();
674                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
675                         do_emit_video(i->first, i->second);
676                 }
677         }
678
679         return done;
680 }
681
682 optional<PositionImage>
683 Player::captions_for_frame (DCPTime time) const
684 {
685         list<PositionImage> captions;
686
687         int const vfr = _film->video_frame_rate();
688
689         for (int i = 0; i < CAPTION_COUNT; ++i) {
690                 bool const always = _always_burn_captions && *_always_burn_captions == i;
691                 BOOST_FOREACH (
692                         PlayerCaption j,
693                         _active_captions[i].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), always)
694                         ) {
695
696                         /* Image subtitles */
697                         list<PositionImage> c = transform_bitmap_captions (j.image);
698                         copy (c.begin(), c.end(), back_inserter (captions));
699
700                         /* Text subtitles (rendered to an image) */
701                         if (!j.text.empty ()) {
702                                 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
703                                 copy (s.begin(), s.end(), back_inserter (captions));
704                         }
705                 }
706         }
707
708         if (captions.empty ()) {
709                 return optional<PositionImage> ();
710         }
711
712         return merge (captions);
713 }
714
715 void
716 Player::video (weak_ptr<Piece> wp, ContentVideo video)
717 {
718         shared_ptr<Piece> piece = wp.lock ();
719         if (!piece) {
720                 return;
721         }
722
723         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
724         if (frc.skip && (video.frame % 2) == 1) {
725                 return;
726         }
727
728         /* Time of the first frame we will emit */
729         DCPTime const time = content_video_to_dcp (piece, video.frame);
730
731         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
732            if it's after the content's period here as in that case we still need to fill any gap between
733            `now' and the end of the content's period.
734         */
735         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
736                 return;
737         }
738
739         /* Fill gaps that we discover now that we have some video which needs to be emitted.
740            This is where we need to fill to.
741         */
742         DCPTime fill_to = min (time, piece->content->end());
743
744         if (_last_video_time) {
745                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
746                 LastVideoMap::const_iterator last = _last_video.find (wp);
747                 if (_film->three_d()) {
748                         DCPTime j = fill_from;
749                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
750                         if (eyes == EYES_BOTH) {
751                                 eyes = EYES_LEFT;
752                         }
753                         while (j < fill_to || eyes != video.eyes) {
754                                 if (last != _last_video.end()) {
755                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
756                                         copy->set_eyes (eyes);
757                                         emit_video (copy, j);
758                                 } else {
759                                         emit_video (black_player_video_frame(eyes), j);
760                                 }
761                                 if (eyes == EYES_RIGHT) {
762                                         j += one_video_frame();
763                                 }
764                                 eyes = increment_eyes (eyes);
765                         }
766                 } else {
767                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
768                                 if (last != _last_video.end()) {
769                                         emit_video (last->second, j);
770                                 } else {
771                                         emit_video (black_player_video_frame(EYES_BOTH), j);
772                                 }
773                         }
774                 }
775         }
776
777         _last_video[wp].reset (
778                 new PlayerVideo (
779                         video.image,
780                         piece->content->video->crop (),
781                         piece->content->video->fade (video.frame),
782                         piece->content->video->scale().size (
783                                 piece->content->video, _video_container_size, _film->frame_size ()
784                                 ),
785                         _video_container_size,
786                         video.eyes,
787                         video.part,
788                         piece->content->video->colour_conversion(),
789                         piece->content,
790                         video.frame
791                         )
792                 );
793
794         DCPTime t = time;
795         for (int i = 0; i < frc.repeat; ++i) {
796                 if (t < piece->content->end()) {
797                         emit_video (_last_video[wp], t);
798                 }
799                 t += one_video_frame ();
800         }
801 }
802
803 void
804 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
805 {
806         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
807
808         shared_ptr<Piece> piece = wp.lock ();
809         if (!piece) {
810                 return;
811         }
812
813         shared_ptr<AudioContent> content = piece->content->audio;
814         DCPOMATIC_ASSERT (content);
815
816         /* Compute time in the DCP */
817         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
818         /* And the end of this block in the DCP */
819         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
820
821         /* Remove anything that comes before the start or after the end of the content */
822         if (time < piece->content->position()) {
823                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
824                 if (!cut.first) {
825                         /* This audio is entirely discarded */
826                         return;
827                 }
828                 content_audio.audio = cut.first;
829                 time = cut.second;
830         } else if (time > piece->content->end()) {
831                 /* Discard it all */
832                 return;
833         } else if (end > piece->content->end()) {
834                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
835                 if (remaining_frames == 0) {
836                         return;
837                 }
838                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
839                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
840                 content_audio.audio = cut;
841         }
842
843         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
844
845         /* Gain */
846
847         if (content->gain() != 0) {
848                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
849                 gain->apply_gain (content->gain ());
850                 content_audio.audio = gain;
851         }
852
853         /* Remap */
854
855         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
856
857         /* Process */
858
859         if (_audio_processor) {
860                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
861         }
862
863         /* Push */
864
865         _audio_merger.push (content_audio.audio, time);
866         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
867         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
868 }
869
870 void
871 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentBitmapCaption subtitle)
872 {
873         shared_ptr<Piece> piece = wp.lock ();
874         shared_ptr<const CaptionContent> caption = wc.lock ();
875         if (!piece || !caption) {
876                 return;
877         }
878
879         /* Apply content's subtitle offsets */
880         subtitle.sub.rectangle.x += caption->x_offset ();
881         subtitle.sub.rectangle.y += caption->y_offset ();
882
883         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
884         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((caption->x_scale() - 1) / 2);
885         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((caption->y_scale() - 1) / 2);
886
887         /* Apply content's subtitle scale */
888         subtitle.sub.rectangle.width *= caption->x_scale ();
889         subtitle.sub.rectangle.height *= caption->y_scale ();
890
891         PlayerCaption ps;
892         ps.image.push_back (subtitle.sub);
893         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
894
895         _active_captions[subtitle.type()].add_from (wc, ps, from);
896 }
897
898 void
899 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTextCaption subtitle)
900 {
901         shared_ptr<Piece> piece = wp.lock ();
902         shared_ptr<const CaptionContent> caption = wc.lock ();
903         if (!piece || !caption) {
904                 return;
905         }
906
907         PlayerCaption ps;
908         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
909
910         if (from > piece->content->end()) {
911                 return;
912         }
913
914         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
915                 s.set_h_position (s.h_position() + caption->x_offset ());
916                 s.set_v_position (s.v_position() + caption->y_offset ());
917                 float const xs = caption->x_scale();
918                 float const ys = caption->y_scale();
919                 float size = s.size();
920
921                 /* Adjust size to express the common part of the scaling;
922                    e.g. if xs = ys = 0.5 we scale size by 2.
923                 */
924                 if (xs > 1e-5 && ys > 1e-5) {
925                         size *= 1 / min (1 / xs, 1 / ys);
926                 }
927                 s.set_size (size);
928
929                 /* Then express aspect ratio changes */
930                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
931                         s.set_aspect_adjust (xs / ys);
932                 }
933
934                 s.set_in (dcp::Time(from.seconds(), 1000));
935                 ps.text.push_back (TextCaption (s, caption->outline_width()));
936                 ps.add_fonts (caption->fonts ());
937         }
938
939         _active_captions[subtitle.type()].add_from (wc, ps, from);
940 }
941
942 void
943 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const CaptionContent> wc, ContentTime to, CaptionType type)
944 {
945         if (!_active_captions[type].have (wc)) {
946                 return;
947         }
948
949         shared_ptr<Piece> piece = wp.lock ();
950         shared_ptr<const CaptionContent> caption = wc.lock ();
951         if (!piece || !caption) {
952                 return;
953         }
954
955         DCPTime const dcp_to = content_time_to_dcp (piece, to);
956
957         if (dcp_to > piece->content->end()) {
958                 return;
959         }
960
961         pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wc, dcp_to);
962
963         bool const always = _always_burn_captions && *_always_burn_captions == type;
964         if (caption->use() && !always && !caption->burn()) {
965                 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
966         }
967 }
968
969 void
970 Player::seek (DCPTime time, bool accurate)
971 {
972         if (!_have_valid_pieces) {
973                 setup_pieces ();
974         }
975
976         if (_shuffler) {
977                 _shuffler->clear ();
978         }
979
980         _delay.clear ();
981
982         if (_audio_processor) {
983                 _audio_processor->flush ();
984         }
985
986         _audio_merger.clear ();
987         for (int i = 0; i < CAPTION_COUNT; ++i) {
988                 _active_captions[i].clear ();
989         }
990
991         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
992                 if (time < i->content->position()) {
993                         /* Before; seek to the start of the content */
994                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
995                         i->done = false;
996                 } else if (i->content->position() <= time && time < i->content->end()) {
997                         /* During; seek to position */
998                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
999                         i->done = false;
1000                 } else {
1001                         /* After; this piece is done */
1002                         i->done = true;
1003                 }
1004         }
1005
1006         if (accurate) {
1007                 _last_video_time = time;
1008                 _last_video_eyes = EYES_LEFT;
1009                 _last_audio_time = time;
1010         } else {
1011                 _last_video_time = optional<DCPTime>();
1012                 _last_video_eyes = optional<Eyes>();
1013                 _last_audio_time = optional<DCPTime>();
1014         }
1015
1016         _black.set_position (time);
1017         _silent.set_position (time);
1018
1019         _last_video.clear ();
1020 }
1021
1022 void
1023 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1024 {
1025         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1026            player before the video that requires them.
1027         */
1028         _delay.push_back (make_pair (pv, time));
1029
1030         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1031                 _last_video_time = time + one_video_frame();
1032         }
1033         _last_video_eyes = increment_eyes (pv->eyes());
1034
1035         if (_delay.size() < 3) {
1036                 return;
1037         }
1038
1039         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1040         _delay.pop_front();
1041         do_emit_video (to_do.first, to_do.second);
1042 }
1043
1044 void
1045 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1046 {
1047         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1048                 for (int i = 0; i < CAPTION_COUNT; ++i) {
1049                         _active_captions[i].clear_before (time);
1050                 }
1051         }
1052
1053         optional<PositionImage> captions = captions_for_frame (time);
1054         if (captions) {
1055                 pv->set_caption (captions.get ());
1056         }
1057
1058         Video (pv, time);
1059 }
1060
1061 void
1062 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1063 {
1064         /* Log if the assert below is about to fail */
1065         if (_last_audio_time && time != *_last_audio_time) {
1066                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1067         }
1068
1069         /* This audio must follow on from the previous */
1070         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1071         Audio (data, time);
1072         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1073 }
1074
1075 void
1076 Player::fill_audio (DCPTimePeriod period)
1077 {
1078         if (period.from == period.to) {
1079                 return;
1080         }
1081
1082         DCPOMATIC_ASSERT (period.from < period.to);
1083
1084         DCPTime t = period.from;
1085         while (t < period.to) {
1086                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1087                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1088                 if (samples) {
1089                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1090                         silence->make_silent ();
1091                         emit_audio (silence, t);
1092                 }
1093                 t += block;
1094         }
1095 }
1096
1097 DCPTime
1098 Player::one_video_frame () const
1099 {
1100         return DCPTime::from_frames (1, _film->video_frame_rate ());
1101 }
1102
1103 pair<shared_ptr<AudioBuffers>, DCPTime>
1104 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1105 {
1106         DCPTime const discard_time = discard_to - time;
1107         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1108         Frame remaining_frames = audio->frames() - discard_frames;
1109         if (remaining_frames <= 0) {
1110                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1111         }
1112         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1113         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1114         return make_pair(cut, time + discard_time);
1115 }
1116
1117 void
1118 Player::set_dcp_decode_reduction (optional<int> reduction)
1119 {
1120         if (reduction == _dcp_decode_reduction) {
1121                 return;
1122         }
1123
1124         _dcp_decode_reduction = reduction;
1125         _have_valid_pieces = false;
1126         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1127 }
1128
1129 DCPTime
1130 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1131 {
1132         if (_have_valid_pieces) {
1133                 setup_pieces ();
1134         }
1135
1136         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1137                 if (i->content == content) {
1138                         return content_time_to_dcp (i, t);
1139                 }
1140         }
1141
1142         DCPOMATIC_ASSERT (false);
1143         return DCPTime ();
1144 }