df58ed223af0ea043322f5a2e42262dc0466656a
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include "shuffler.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_subtitle (false)
93         , _always_burn_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102         set_video_container_size (_film->frame_size ());
103
104         film_changed (Film::AUDIO_PROCESSOR);
105
106         seek (DCPTime (), true);
107 }
108
109 Player::~Player ()
110 {
111         delete _shuffler;
112 }
113
114 void
115 Player::setup_pieces ()
116 {
117         _pieces.clear ();
118
119         delete _shuffler;
120         _shuffler = new Shuffler();
121         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
122
123         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
124
125                 if (!i->paths_valid ()) {
126                         continue;
127                 }
128
129                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131
132                 if (!decoder) {
133                         /* Not something that we can decode; e.g. Atmos content */
134                         continue;
135                 }
136
137                 if (decoder->video && _ignore_video) {
138                         decoder->video->set_ignore (true);
139                 }
140
141                 if (decoder->subtitle && _ignore_subtitle) {
142                         decoder->subtitle->set_ignore (true);
143                 }
144
145                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
146                 if (dcp) {
147                         dcp->set_decode_referenced (_play_referenced);
148                         if (_play_referenced) {
149                                 dcp->set_forced_reduction (_dcp_decode_reduction);
150                         }
151                 }
152
153                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
154                 _pieces.push_back (piece);
155
156                 if (decoder->video) {
157                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
158                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
159                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
160                         } else {
161                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
162                         }
163                 }
164
165                 if (decoder->audio) {
166                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
167                 }
168
169                 if (decoder->subtitle) {
170                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
171                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
172                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
173                 }
174         }
175
176         _stream_states.clear ();
177         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
178                 if (i->content->audio) {
179                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
180                                 _stream_states[j] = StreamState (i, i->content->position ());
181                         }
182                 }
183         }
184
185         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
186         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
187
188         _last_video_time = DCPTime ();
189         _last_video_eyes = EYES_BOTH;
190         _last_audio_time = DCPTime ();
191         _have_valid_pieces = true;
192 }
193
194 void
195 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
196 {
197         shared_ptr<Content> c = w.lock ();
198         if (!c) {
199                 return;
200         }
201
202         if (
203                 property == ContentProperty::POSITION ||
204                 property == ContentProperty::LENGTH ||
205                 property == ContentProperty::TRIM_START ||
206                 property == ContentProperty::TRIM_END ||
207                 property == ContentProperty::PATH ||
208                 property == VideoContentProperty::FRAME_TYPE ||
209                 property == VideoContentProperty::COLOUR_CONVERSION ||
210                 property == AudioContentProperty::STREAMS ||
211                 property == DCPContentProperty::NEEDS_ASSETS ||
212                 property == DCPContentProperty::NEEDS_KDM ||
213                 property == SubtitleContentProperty::COLOUR ||
214                 property == SubtitleContentProperty::EFFECT ||
215                 property == SubtitleContentProperty::EFFECT_COLOUR ||
216                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
217                 property == FFmpegContentProperty::FILTERS
218                 ) {
219
220                 _have_valid_pieces = false;
221                 Changed (property, frequent);
222
223         } else if (
224                 property == SubtitleContentProperty::LINE_SPACING ||
225                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
226                 property == SubtitleContentProperty::Y_SCALE ||
227                 property == SubtitleContentProperty::FADE_IN ||
228                 property == SubtitleContentProperty::FADE_OUT ||
229                 property == ContentProperty::VIDEO_FRAME_RATE ||
230                 property == SubtitleContentProperty::USE ||
231                 property == SubtitleContentProperty::X_OFFSET ||
232                 property == SubtitleContentProperty::Y_OFFSET ||
233                 property == SubtitleContentProperty::X_SCALE ||
234                 property == SubtitleContentProperty::FONTS ||
235                 property == VideoContentProperty::CROP ||
236                 property == VideoContentProperty::SCALE ||
237                 property == VideoContentProperty::FADE_IN ||
238                 property == VideoContentProperty::FADE_OUT
239                 ) {
240
241                 Changed (property, frequent);
242         }
243 }
244
245 void
246 Player::set_video_container_size (dcp::Size s)
247 {
248         if (s == _video_container_size) {
249                 return;
250         }
251
252         _video_container_size = s;
253
254         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
255         _black_image->make_black ();
256
257         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
258 }
259
260 void
261 Player::playlist_changed ()
262 {
263         _have_valid_pieces = false;
264         Changed (PlayerProperty::PLAYLIST, false);
265 }
266
267 void
268 Player::film_changed (Film::Property p)
269 {
270         /* Here we should notice Film properties that affect our output, and
271            alert listeners that our output now would be different to how it was
272            last time we were run.
273         */
274
275         if (p == Film::CONTAINER) {
276                 Changed (PlayerProperty::FILM_CONTAINER, false);
277         } else if (p == Film::VIDEO_FRAME_RATE) {
278                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
279                    so we need new pieces here.
280                 */
281                 _have_valid_pieces = false;
282                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
283         } else if (p == Film::AUDIO_PROCESSOR) {
284                 if (_film->audio_processor ()) {
285                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
286                 }
287         } else if (p == Film::AUDIO_CHANNELS) {
288                 _audio_merger.clear ();
289         }
290 }
291
292 list<PositionImage>
293 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
294 {
295         list<PositionImage> all;
296
297         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
298                 if (!i->image) {
299                         continue;
300                 }
301
302                 /* We will scale the subtitle up to fit _video_container_size */
303                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
304
305                 all.push_back (
306                         PositionImage (
307                                 i->image->scale (
308                                         scaled_size,
309                                         dcp::YUV_TO_RGB_REC601,
310                                         i->image->pixel_format (),
311                                         true,
312                                         _fast
313                                         ),
314                                 Position<int> (
315                                         lrint (_video_container_size.width * i->rectangle.x),
316                                         lrint (_video_container_size.height * i->rectangle.y)
317                                         )
318                                 )
319                         );
320         }
321
322         return all;
323 }
324
325 shared_ptr<PlayerVideo>
326 Player::black_player_video_frame (Eyes eyes) const
327 {
328         return shared_ptr<PlayerVideo> (
329                 new PlayerVideo (
330                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
331                         Crop (),
332                         optional<double> (),
333                         _video_container_size,
334                         _video_container_size,
335                         eyes,
336                         PART_WHOLE,
337                         PresetColourConversion::all().front().conversion,
338                         boost::weak_ptr<Content>(),
339                         boost::optional<Frame>()
340                 )
341         );
342 }
343
344 Frame
345 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
346 {
347         DCPTime s = t - piece->content->position ();
348         s = min (piece->content->length_after_trim(), s);
349         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
350
351         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
352            then convert that ContentTime to frames at the content's rate.  However this fails for
353            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
354            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
355
356            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
357         */
358         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
359 }
360
361 DCPTime
362 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
363 {
364         /* See comment in dcp_to_content_video */
365         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
366         return d + piece->content->position();
367 }
368
369 Frame
370 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
371 {
372         DCPTime s = t - piece->content->position ();
373         s = min (piece->content->length_after_trim(), s);
374         /* See notes in dcp_to_content_video */
375         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
376 }
377
378 DCPTime
379 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
380 {
381         /* See comment in dcp_to_content_video */
382         return DCPTime::from_frames (f, _film->audio_frame_rate())
383                 - DCPTime (piece->content->trim_start(), piece->frc)
384                 + piece->content->position();
385 }
386
387 ContentTime
388 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
389 {
390         DCPTime s = t - piece->content->position ();
391         s = min (piece->content->length_after_trim(), s);
392         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
393 }
394
395 DCPTime
396 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
397 {
398         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
399 }
400
401 list<shared_ptr<Font> >
402 Player::get_subtitle_fonts ()
403 {
404         if (!_have_valid_pieces) {
405                 setup_pieces ();
406         }
407
408         list<shared_ptr<Font> > fonts;
409         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
410                 if (p->content->subtitle) {
411                         /* XXX: things may go wrong if there are duplicate font IDs
412                            with different font files.
413                         */
414                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
415                         copy (f.begin(), f.end(), back_inserter (fonts));
416                 }
417         }
418
419         return fonts;
420 }
421
422 /** Set this player never to produce any video data */
423 void
424 Player::set_ignore_video ()
425 {
426         _ignore_video = true;
427 }
428
429 void
430 Player::set_ignore_subtitle ()
431 {
432         _ignore_subtitle = true;
433 }
434
435 /** Set whether or not this player should always burn text subtitles into the image,
436  *  regardless of the content settings.
437  *  @param burn true to always burn subtitles, false to obey content settings.
438  */
439 void
440 Player::set_always_burn_subtitles (bool burn)
441 {
442         _always_burn_subtitles = burn;
443 }
444
445 /** Sets up the player to be faster, possibly at the expense of quality */
446 void
447 Player::set_fast ()
448 {
449         _fast = true;
450         _have_valid_pieces = false;
451 }
452
453 void
454 Player::set_play_referenced ()
455 {
456         _play_referenced = true;
457         _have_valid_pieces = false;
458 }
459
460 list<ReferencedReelAsset>
461 Player::get_reel_assets ()
462 {
463         list<ReferencedReelAsset> a;
464
465         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
466                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
467                 if (!j) {
468                         continue;
469                 }
470
471                 scoped_ptr<DCPDecoder> decoder;
472                 try {
473                         decoder.reset (new DCPDecoder (j, _film->log(), false));
474                 } catch (...) {
475                         return a;
476                 }
477
478                 int64_t offset = 0;
479                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
480
481                         DCPOMATIC_ASSERT (j->video_frame_rate ());
482                         double const cfr = j->video_frame_rate().get();
483                         Frame const trim_start = j->trim_start().frames_round (cfr);
484                         Frame const trim_end = j->trim_end().frames_round (cfr);
485                         int const ffr = _film->video_frame_rate ();
486
487                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
488                         if (j->reference_video ()) {
489                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
490                                 DCPOMATIC_ASSERT (ra);
491                                 ra->set_entry_point (ra->entry_point() + trim_start);
492                                 ra->set_duration (ra->duration() - trim_start - trim_end);
493                                 a.push_back (
494                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
495                                         );
496                         }
497
498                         if (j->reference_audio ()) {
499                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
500                                 DCPOMATIC_ASSERT (ra);
501                                 ra->set_entry_point (ra->entry_point() + trim_start);
502                                 ra->set_duration (ra->duration() - trim_start - trim_end);
503                                 a.push_back (
504                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
505                                         );
506                         }
507
508                         if (j->reference_subtitle ()) {
509                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
510                                 DCPOMATIC_ASSERT (ra);
511                                 ra->set_entry_point (ra->entry_point() + trim_start);
512                                 ra->set_duration (ra->duration() - trim_start - trim_end);
513                                 a.push_back (
514                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
515                                         );
516                         }
517
518                         /* Assume that main picture duration is the length of the reel */
519                         offset += k->main_picture()->duration ();
520                 }
521         }
522
523         return a;
524 }
525
526 bool
527 Player::pass ()
528 {
529         if (!_have_valid_pieces) {
530                 setup_pieces ();
531         }
532
533         if (_playlist->length() == DCPTime()) {
534                 /* Special case of an empty Film; just give one black frame */
535                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
536                 return true;
537         }
538
539         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
540
541         shared_ptr<Piece> earliest_content;
542         optional<DCPTime> earliest_time;
543
544         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
545                 if (i->done) {
546                         continue;
547                 }
548
549                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
550                 if (t > i->content->end()) {
551                         i->done = true;
552                 } else {
553
554                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
555                            the video.
556                         */
557                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
558                                 earliest_time = t;
559                                 earliest_content = i;
560                         }
561                 }
562         }
563
564         bool done = false;
565
566         enum {
567                 NONE,
568                 CONTENT,
569                 BLACK,
570                 SILENT
571         } which = NONE;
572
573         if (earliest_content) {
574                 which = CONTENT;
575         }
576
577         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
578                 earliest_time = _black.position ();
579                 which = BLACK;
580         }
581
582         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
583                 earliest_time = _silent.position ();
584                 which = SILENT;
585         }
586
587         switch (which) {
588         case CONTENT:
589                 earliest_content->done = earliest_content->decoder->pass ();
590                 break;
591         case BLACK:
592                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
593                 _black.set_position (_black.position() + one_video_frame());
594                 break;
595         case SILENT:
596         {
597                 DCPTimePeriod period (_silent.period_at_position());
598                 if (_last_audio_time) {
599                         /* Sometimes the thing that happened last finishes fractionally before
600                            this silence.  Bodge the start time of the silence to fix it.  I'm
601                            not sure if this is the right solution --- maybe the last thing should
602                            be padded `forward' rather than this thing padding `back'.
603                         */
604                         period.from = min(period.from, *_last_audio_time);
605                 }
606                 if (period.duration() > one_video_frame()) {
607                         period.to = period.from + one_video_frame();
608                 }
609                 fill_audio (period);
610                 _silent.set_position (period.to);
611                 break;
612         }
613         case NONE:
614                 done = true;
615                 break;
616         }
617
618         /* Emit any audio that is ready */
619
620         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
621            of our streams, or the position of the _silent.
622         */
623         DCPTime pull_to = _film->length ();
624         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
625                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
626                         pull_to = i->second.last_push_end;
627                 }
628         }
629         if (!_silent.done() && _silent.position() < pull_to) {
630                 pull_to = _silent.position();
631         }
632
633         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
634         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
635                 if (_last_audio_time && i->second < *_last_audio_time) {
636                         /* This new data comes before the last we emitted (or the last seek); discard it */
637                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
638                         if (!cut.first) {
639                                 continue;
640                         }
641                         *i = cut;
642                 } else if (_last_audio_time && i->second > *_last_audio_time) {
643                         /* There's a gap between this data and the last we emitted; fill with silence */
644                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
645                 }
646
647                 emit_audio (i->first, i->second);
648         }
649
650         if (done) {
651                 _shuffler->flush ();
652                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
653                         do_emit_video(i->first, i->second);
654                 }
655         }
656
657         return done;
658 }
659
660 optional<PositionImage>
661 Player::subtitles_for_frame (DCPTime time) const
662 {
663         list<PositionImage> subtitles;
664
665         int const vfr = _film->video_frame_rate();
666
667         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
668
669                 /* Image subtitles */
670                 list<PositionImage> c = transform_image_subtitles (i.image);
671                 copy (c.begin(), c.end(), back_inserter (subtitles));
672
673                 /* Text subtitles (rendered to an image) */
674                 if (!i.text.empty ()) {
675                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
676                         copy (s.begin(), s.end(), back_inserter (subtitles));
677                 }
678         }
679
680         if (subtitles.empty ()) {
681                 return optional<PositionImage> ();
682         }
683
684         return merge (subtitles);
685 }
686
687 void
688 Player::video (weak_ptr<Piece> wp, ContentVideo video)
689 {
690         shared_ptr<Piece> piece = wp.lock ();
691         if (!piece) {
692                 return;
693         }
694
695         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
696         if (frc.skip && (video.frame % 2) == 1) {
697                 return;
698         }
699
700         /* Time of the first frame we will emit */
701         DCPTime const time = content_video_to_dcp (piece, video.frame);
702
703         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
704            if it's after the content's period here as in that case we still need to fill any gap between
705            `now' and the end of the content's period.
706         */
707         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
708                 return;
709         }
710
711         /* Fill gaps that we discover now that we have some video which needs to be emitted.
712            This is where we need to fill to.
713         */
714         DCPTime fill_to = min (time, piece->content->end());
715
716         if (_last_video_time) {
717                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
718                 LastVideoMap::const_iterator last = _last_video.find (wp);
719                 if (_film->three_d()) {
720                         DCPTime j = fill_from;
721                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
722                         if (eyes == EYES_BOTH) {
723                                 eyes = EYES_LEFT;
724                         }
725                         while (j < fill_to || eyes != video.eyes) {
726                                 if (last != _last_video.end()) {
727                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
728                                         copy->set_eyes (eyes);
729                                         emit_video (copy, j);
730                                 } else {
731                                         emit_video (black_player_video_frame(eyes), j);
732                                 }
733                                 if (eyes == EYES_RIGHT) {
734                                         j += one_video_frame();
735                                 }
736                                 eyes = increment_eyes (eyes);
737                         }
738                 } else {
739                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
740                                 if (last != _last_video.end()) {
741                                         emit_video (last->second, j);
742                                 } else {
743                                         emit_video (black_player_video_frame(EYES_BOTH), j);
744                                 }
745                         }
746                 }
747         }
748
749         _last_video[wp].reset (
750                 new PlayerVideo (
751                         video.image,
752                         piece->content->video->crop (),
753                         piece->content->video->fade (video.frame),
754                         piece->content->video->scale().size (
755                                 piece->content->video, _video_container_size, _film->frame_size ()
756                                 ),
757                         _video_container_size,
758                         video.eyes,
759                         video.part,
760                         piece->content->video->colour_conversion(),
761                         piece->content,
762                         video.frame
763                         )
764                 );
765
766         DCPTime t = time;
767         for (int i = 0; i < frc.repeat; ++i) {
768                 if (t < piece->content->end()) {
769                         emit_video (_last_video[wp], t);
770                 }
771                 t += one_video_frame ();
772         }
773 }
774
775 void
776 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
777 {
778         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
779
780         shared_ptr<Piece> piece = wp.lock ();
781         if (!piece) {
782                 return;
783         }
784
785         shared_ptr<AudioContent> content = piece->content->audio;
786         DCPOMATIC_ASSERT (content);
787
788         /* Compute time in the DCP */
789         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
790         /* And the end of this block in the DCP */
791         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
792
793         /* Remove anything that comes before the start or after the end of the content */
794         if (time < piece->content->position()) {
795                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
796                 if (!cut.first) {
797                         /* This audio is entirely discarded */
798                         return;
799                 }
800                 content_audio.audio = cut.first;
801                 time = cut.second;
802         } else if (time > piece->content->end()) {
803                 /* Discard it all */
804                 return;
805         } else if (end > piece->content->end()) {
806                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
807                 if (remaining_frames == 0) {
808                         return;
809                 }
810                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
811                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
812                 content_audio.audio = cut;
813         }
814
815         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
816
817         /* Gain */
818
819         if (content->gain() != 0) {
820                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
821                 gain->apply_gain (content->gain ());
822                 content_audio.audio = gain;
823         }
824
825         /* Remap */
826
827         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
828
829         /* Process */
830
831         if (_audio_processor) {
832                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
833         }
834
835         /* Push */
836
837         _audio_merger.push (content_audio.audio, time);
838         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
839         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
840 }
841
842 void
843 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
844 {
845         shared_ptr<Piece> piece = wp.lock ();
846         if (!piece) {
847                 return;
848         }
849
850         /* Apply content's subtitle offsets */
851         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
852         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
853
854         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
855         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((piece->content->subtitle->x_scale() - 1) / 2);
856         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((piece->content->subtitle->y_scale() - 1) / 2);
857
858         /* Apply content's subtitle scale */
859         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
860         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
861
862         PlayerSubtitles ps;
863         ps.image.push_back (subtitle.sub);
864         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
865
866         _active_subtitles.add_from (wp, ps, from);
867 }
868
869 void
870 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
871 {
872         shared_ptr<Piece> piece = wp.lock ();
873         if (!piece) {
874                 return;
875         }
876
877         PlayerSubtitles ps;
878         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
879
880         if (from > piece->content->end()) {
881                 return;
882         }
883
884         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
885                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
886                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
887                 float const xs = piece->content->subtitle->x_scale();
888                 float const ys = piece->content->subtitle->y_scale();
889                 float size = s.size();
890
891                 /* Adjust size to express the common part of the scaling;
892                    e.g. if xs = ys = 0.5 we scale size by 2.
893                 */
894                 if (xs > 1e-5 && ys > 1e-5) {
895                         size *= 1 / min (1 / xs, 1 / ys);
896                 }
897                 s.set_size (size);
898
899                 /* Then express aspect ratio changes */
900                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
901                         s.set_aspect_adjust (xs / ys);
902                 }
903
904                 s.set_in (dcp::Time(from.seconds(), 1000));
905                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
906                 ps.add_fonts (piece->content->subtitle->fonts ());
907         }
908
909         _active_subtitles.add_from (wp, ps, from);
910 }
911
912 void
913 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
914 {
915         if (!_active_subtitles.have (wp)) {
916                 return;
917         }
918
919         shared_ptr<Piece> piece = wp.lock ();
920         if (!piece) {
921                 return;
922         }
923
924         DCPTime const dcp_to = content_time_to_dcp (piece, to);
925
926         if (dcp_to > piece->content->end()) {
927                 return;
928         }
929
930         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
931
932         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
933                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
934         }
935 }
936
937 void
938 Player::seek (DCPTime time, bool accurate)
939 {
940         if (!_have_valid_pieces) {
941                 setup_pieces ();
942         }
943
944         if (_shuffler) {
945                 _shuffler->clear ();
946         }
947
948         _delay.clear ();
949
950         if (_audio_processor) {
951                 _audio_processor->flush ();
952         }
953
954         _audio_merger.clear ();
955         _active_subtitles.clear ();
956
957         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
958                 if (time < i->content->position()) {
959                         /* Before; seek to the start of the content */
960                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
961                         i->done = false;
962                 } else if (i->content->position() <= time && time < i->content->end()) {
963                         /* During; seek to position */
964                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
965                         i->done = false;
966                 } else {
967                         /* After; this piece is done */
968                         i->done = true;
969                 }
970         }
971
972         if (accurate) {
973                 _last_video_time = time;
974                 _last_video_eyes = EYES_LEFT;
975                 _last_audio_time = time;
976         } else {
977                 _last_video_time = optional<DCPTime>();
978                 _last_video_eyes = optional<Eyes>();
979                 _last_audio_time = optional<DCPTime>();
980         }
981
982         _black.set_position (time);
983         _silent.set_position (time);
984
985         _last_video.clear ();
986 }
987
988 void
989 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
990 {
991         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
992            player before the video that requires them.
993         */
994         _delay.push_back (make_pair (pv, time));
995
996         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
997                 _last_video_time = time + one_video_frame();
998         }
999         _last_video_eyes = increment_eyes (pv->eyes());
1000
1001         if (_delay.size() < 3) {
1002                 return;
1003         }
1004
1005         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1006         _delay.pop_front();
1007         do_emit_video (to_do.first, to_do.second);
1008 }
1009
1010 void
1011 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1012 {
1013         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1014                 _active_subtitles.clear_before (time);
1015         }
1016
1017         optional<PositionImage> subtitles = subtitles_for_frame (time);
1018         if (subtitles) {
1019                 pv->set_subtitle (subtitles.get ());
1020         }
1021
1022         Video (pv, time);
1023 }
1024
1025 void
1026 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1027 {
1028         /* Log if the assert below is about to fail */
1029         if (_last_audio_time && time != *_last_audio_time) {
1030                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1031         }
1032
1033         /* This audio must follow on from the previous */
1034         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1035         Audio (data, time);
1036         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1037 }
1038
1039 void
1040 Player::fill_audio (DCPTimePeriod period)
1041 {
1042         if (period.from == period.to) {
1043                 return;
1044         }
1045
1046         DCPOMATIC_ASSERT (period.from < period.to);
1047
1048         DCPTime t = period.from;
1049         while (t < period.to) {
1050                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1051                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1052                 if (samples) {
1053                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1054                         silence->make_silent ();
1055                         emit_audio (silence, t);
1056                 }
1057                 t += block;
1058         }
1059 }
1060
1061 DCPTime
1062 Player::one_video_frame () const
1063 {
1064         return DCPTime::from_frames (1, _film->video_frame_rate ());
1065 }
1066
1067 pair<shared_ptr<AudioBuffers>, DCPTime>
1068 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1069 {
1070         DCPTime const discard_time = discard_to - time;
1071         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1072         Frame remaining_frames = audio->frames() - discard_frames;
1073         if (remaining_frames <= 0) {
1074                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1075         }
1076         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1077         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1078         return make_pair(cut, time + discard_time);
1079 }
1080
1081 void
1082 Player::set_dcp_decode_reduction (optional<int> reduction)
1083 {
1084         if (reduction == _dcp_decode_reduction) {
1085                 return;
1086         }
1087
1088         _dcp_decode_reduction = reduction;
1089         _have_valid_pieces = false;
1090         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1091 }
1092
1093 DCPTime
1094 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1095 {
1096         if (_have_valid_pieces) {
1097                 setup_pieces ();
1098         }
1099
1100         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1101                 if (i->content == content) {
1102                         return content_time_to_dcp (i, t);
1103                 }
1104         }
1105
1106         DCPOMATIC_ASSERT (false);
1107         return DCPTime ();
1108 }