Add a 2-frame `delay' on content arriving at the player to give
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include "shuffler.h"
52 #include "delay.h"
53 #include <dcp/reel.h>
54 #include <dcp/reel_sound_asset.h>
55 #include <dcp/reel_subtitle_asset.h>
56 #include <dcp/reel_picture_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using boost::shared_ptr;
77 using boost::weak_ptr;
78 using boost::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81
82 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
83         : _film (film)
84         , _playlist (playlist)
85         , _have_valid_pieces (false)
86         , _ignore_video (false)
87         , _ignore_subtitle (false)
88         , _always_burn_subtitles (false)
89         , _fast (false)
90         , _play_referenced (false)
91         , _audio_merger (_film->audio_frame_rate())
92         , _shuffler (0)
93         , _delay (0)
94 {
95         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
96         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
97         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
98         set_video_container_size (_film->frame_size ());
99
100         film_changed (Film::AUDIO_PROCESSOR);
101
102         seek (DCPTime (), true);
103 }
104
105 Player::~Player ()
106 {
107         delete _shuffler;
108         delete _delay;
109 }
110
111 void
112 Player::setup_pieces ()
113 {
114         _pieces.clear ();
115
116         delete _shuffler;
117         _shuffler = new Shuffler();
118         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
119
120         delete _delay;
121         _delay = new Delay();
122         _delay->Video.connect(bind(&Player::video, this, _1, _2));
123
124         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125
126                 if (!i->paths_valid ()) {
127                         continue;
128                 }
129
130                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
131                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
132
133                 if (!decoder) {
134                         /* Not something that we can decode; e.g. Atmos content */
135                         continue;
136                 }
137
138                 if (decoder->video && _ignore_video) {
139                         decoder->video->set_ignore (true);
140                 }
141
142                 if (decoder->subtitle && _ignore_subtitle) {
143                         decoder->subtitle->set_ignore (true);
144                 }
145
146                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
147                 if (dcp) {
148                         dcp->set_decode_referenced (_play_referenced);
149                         if (_play_referenced) {
150                                 dcp->set_forced_reduction (_dcp_decode_reduction);
151                         }
152                 }
153
154                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
155                 _pieces.push_back (piece);
156
157                 if (decoder->video) {
158                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
159                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
160                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
161                         } else {
162                                 /* We need a Delay to give a little wiggle room to ensure that relevent subtitles arrive at the
163                                    player before the video that requires them.
164                                 */
165                                 decoder->video->Data.connect (bind (&Delay::video, _delay, weak_ptr<Piece>(piece), _1));
166                         }
167                 }
168
169                 if (decoder->audio) {
170                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
171                 }
172
173                 if (decoder->subtitle) {
174                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
175                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
176                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
177                 }
178         }
179
180         _stream_states.clear ();
181         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
182                 if (i->content->audio) {
183                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
184                                 _stream_states[j] = StreamState (i, i->content->position ());
185                         }
186                 }
187         }
188
189         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
190         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
191
192         _last_video_time = DCPTime ();
193         _last_video_eyes = EYES_BOTH;
194         _last_audio_time = DCPTime ();
195         _have_valid_pieces = true;
196 }
197
198 void
199 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
200 {
201         shared_ptr<Content> c = w.lock ();
202         if (!c) {
203                 return;
204         }
205
206         if (
207                 property == ContentProperty::POSITION ||
208                 property == ContentProperty::LENGTH ||
209                 property == ContentProperty::TRIM_START ||
210                 property == ContentProperty::TRIM_END ||
211                 property == ContentProperty::PATH ||
212                 property == VideoContentProperty::FRAME_TYPE ||
213                 property == DCPContentProperty::NEEDS_ASSETS ||
214                 property == DCPContentProperty::NEEDS_KDM ||
215                 property == SubtitleContentProperty::COLOUR ||
216                 property == SubtitleContentProperty::EFFECT ||
217                 property == SubtitleContentProperty::EFFECT_COLOUR ||
218                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
219                 property == FFmpegContentProperty::FILTERS ||
220                 property == VideoContentProperty::COLOUR_CONVERSION
221                 ) {
222
223                 _have_valid_pieces = false;
224                 Changed (frequent);
225
226         } else if (
227                 property == SubtitleContentProperty::LINE_SPACING ||
228                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
229                 property == SubtitleContentProperty::Y_SCALE ||
230                 property == SubtitleContentProperty::FADE_IN ||
231                 property == SubtitleContentProperty::FADE_OUT ||
232                 property == ContentProperty::VIDEO_FRAME_RATE ||
233                 property == SubtitleContentProperty::USE ||
234                 property == SubtitleContentProperty::X_OFFSET ||
235                 property == SubtitleContentProperty::Y_OFFSET ||
236                 property == SubtitleContentProperty::X_SCALE ||
237                 property == SubtitleContentProperty::FONTS ||
238                 property == VideoContentProperty::CROP ||
239                 property == VideoContentProperty::SCALE ||
240                 property == VideoContentProperty::FADE_IN ||
241                 property == VideoContentProperty::FADE_OUT
242                 ) {
243
244                 Changed (frequent);
245         }
246 }
247
248 void
249 Player::set_video_container_size (dcp::Size s)
250 {
251         if (s == _video_container_size) {
252                 return;
253         }
254
255         _video_container_size = s;
256
257         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
258         _black_image->make_black ();
259
260         Changed (false);
261 }
262
263 void
264 Player::playlist_changed ()
265 {
266         _have_valid_pieces = false;
267         Changed (false);
268 }
269
270 void
271 Player::film_changed (Film::Property p)
272 {
273         /* Here we should notice Film properties that affect our output, and
274            alert listeners that our output now would be different to how it was
275            last time we were run.
276         */
277
278         if (p == Film::CONTAINER) {
279                 Changed (false);
280         } else if (p == Film::VIDEO_FRAME_RATE) {
281                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
282                    so we need new pieces here.
283                 */
284                 _have_valid_pieces = false;
285                 Changed (false);
286         } else if (p == Film::AUDIO_PROCESSOR) {
287                 if (_film->audio_processor ()) {
288                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
289                 }
290         }
291 }
292
293 list<PositionImage>
294 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
295 {
296         list<PositionImage> all;
297
298         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
299                 if (!i->image) {
300                         continue;
301                 }
302
303                 /* We will scale the subtitle up to fit _video_container_size */
304                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
305
306                 /* Then we need a corrective translation, consisting of two parts:
307                  *
308                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
309                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
310                  *
311                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
312                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
313                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
314                  *
315                  * Combining these two translations gives these expressions.
316                  */
317
318                 all.push_back (
319                         PositionImage (
320                                 i->image->scale (
321                                         scaled_size,
322                                         dcp::YUV_TO_RGB_REC601,
323                                         i->image->pixel_format (),
324                                         true,
325                                         _fast
326                                         ),
327                                 Position<int> (
328                                         lrint (_video_container_size.width * i->rectangle.x),
329                                         lrint (_video_container_size.height * i->rectangle.y)
330                                         )
331                                 )
332                         );
333         }
334
335         return all;
336 }
337
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
340 {
341         return shared_ptr<PlayerVideo> (
342                 new PlayerVideo (
343                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344                         Crop (),
345                         optional<double> (),
346                         _video_container_size,
347                         _video_container_size,
348                         eyes,
349                         PART_WHOLE,
350                         PresetColourConversion::all().front().conversion
351                 )
352         );
353 }
354
355 Frame
356 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
357 {
358         DCPTime s = t - piece->content->position ();
359         s = min (piece->content->length_after_trim(), s);
360         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
361
362         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
363            then convert that ContentTime to frames at the content's rate.  However this fails for
364            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
365            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
366
367            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
368         */
369         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
370 }
371
372 DCPTime
373 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
374 {
375         /* See comment in dcp_to_content_video */
376         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
377         return d + piece->content->position();
378 }
379
380 Frame
381 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
382 {
383         DCPTime s = t - piece->content->position ();
384         s = min (piece->content->length_after_trim(), s);
385         /* See notes in dcp_to_content_video */
386         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
387 }
388
389 DCPTime
390 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
391 {
392         /* See comment in dcp_to_content_video */
393         return DCPTime::from_frames (f, _film->audio_frame_rate())
394                 - DCPTime (piece->content->trim_start(), piece->frc)
395                 + piece->content->position();
396 }
397
398 ContentTime
399 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
400 {
401         DCPTime s = t - piece->content->position ();
402         s = min (piece->content->length_after_trim(), s);
403         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
404 }
405
406 DCPTime
407 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
408 {
409         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
410 }
411
412 list<shared_ptr<Font> >
413 Player::get_subtitle_fonts ()
414 {
415         if (!_have_valid_pieces) {
416                 setup_pieces ();
417         }
418
419         list<shared_ptr<Font> > fonts;
420         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
421                 if (p->content->subtitle) {
422                         /* XXX: things may go wrong if there are duplicate font IDs
423                            with different font files.
424                         */
425                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
426                         copy (f.begin(), f.end(), back_inserter (fonts));
427                 }
428         }
429
430         return fonts;
431 }
432
433 /** Set this player never to produce any video data */
434 void
435 Player::set_ignore_video ()
436 {
437         _ignore_video = true;
438 }
439
440 void
441 Player::set_ignore_subtitle ()
442 {
443         _ignore_subtitle = true;
444 }
445
446 /** Set whether or not this player should always burn text subtitles into the image,
447  *  regardless of the content settings.
448  *  @param burn true to always burn subtitles, false to obey content settings.
449  */
450 void
451 Player::set_always_burn_subtitles (bool burn)
452 {
453         _always_burn_subtitles = burn;
454 }
455
456 /** Sets up the player to be faster, possibly at the expense of quality */
457 void
458 Player::set_fast ()
459 {
460         _fast = true;
461         _have_valid_pieces = false;
462 }
463
464 void
465 Player::set_play_referenced ()
466 {
467         _play_referenced = true;
468         _have_valid_pieces = false;
469 }
470
471 list<ReferencedReelAsset>
472 Player::get_reel_assets ()
473 {
474         list<ReferencedReelAsset> a;
475
476         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
477                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
478                 if (!j) {
479                         continue;
480                 }
481
482                 scoped_ptr<DCPDecoder> decoder;
483                 try {
484                         decoder.reset (new DCPDecoder (j, _film->log(), false));
485                 } catch (...) {
486                         return a;
487                 }
488
489                 int64_t offset = 0;
490                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
491
492                         DCPOMATIC_ASSERT (j->video_frame_rate ());
493                         double const cfr = j->video_frame_rate().get();
494                         Frame const trim_start = j->trim_start().frames_round (cfr);
495                         Frame const trim_end = j->trim_end().frames_round (cfr);
496                         int const ffr = _film->video_frame_rate ();
497
498                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
499                         if (j->reference_video ()) {
500                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
501                                 DCPOMATIC_ASSERT (ra);
502                                 ra->set_entry_point (ra->entry_point() + trim_start);
503                                 ra->set_duration (ra->duration() - trim_start - trim_end);
504                                 a.push_back (
505                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
506                                         );
507                         }
508
509                         if (j->reference_audio ()) {
510                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
511                                 DCPOMATIC_ASSERT (ra);
512                                 ra->set_entry_point (ra->entry_point() + trim_start);
513                                 ra->set_duration (ra->duration() - trim_start - trim_end);
514                                 a.push_back (
515                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
516                                         );
517                         }
518
519                         if (j->reference_subtitle ()) {
520                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
521                                 DCPOMATIC_ASSERT (ra);
522                                 ra->set_entry_point (ra->entry_point() + trim_start);
523                                 ra->set_duration (ra->duration() - trim_start - trim_end);
524                                 a.push_back (
525                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
526                                         );
527                         }
528
529                         /* Assume that main picture duration is the length of the reel */
530                         offset += k->main_picture()->duration ();
531                 }
532         }
533
534         return a;
535 }
536
537 bool
538 Player::pass ()
539 {
540         if (!_have_valid_pieces) {
541                 setup_pieces ();
542         }
543
544         if (_playlist->length() == DCPTime()) {
545                 /* Special case of an empty Film; just give one black frame */
546                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
547                 return true;
548         }
549
550         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
551
552         shared_ptr<Piece> earliest_content;
553         optional<DCPTime> earliest_time;
554
555         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
556                 if (i->done) {
557                         continue;
558                 }
559
560                 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
561                 if (t > i->content->end()) {
562                         i->done = true;
563                 } else {
564
565                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
566                            the video.
567                         */
568                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
569                                 earliest_time = t;
570                                 earliest_content = i;
571                         }
572                 }
573         }
574
575         bool done = false;
576
577         enum {
578                 NONE,
579                 CONTENT,
580                 BLACK,
581                 SILENT
582         } which = NONE;
583
584         if (earliest_content) {
585                 which = CONTENT;
586         }
587
588         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
589                 earliest_time = _black.position ();
590                 which = BLACK;
591         }
592
593         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
594                 earliest_time = _silent.position ();
595                 which = SILENT;
596         }
597
598         switch (which) {
599         case CONTENT:
600                 earliest_content->done = earliest_content->decoder->pass ();
601                 break;
602         case BLACK:
603                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
604                 _black.set_position (_black.position() + one_video_frame());
605                 break;
606         case SILENT:
607         {
608                 DCPTimePeriod period (_silent.period_at_position());
609                 if (_last_audio_time) {
610                         /* Sometimes the thing that happened last finishes fractionally before
611                            this silence.  Bodge the start time of the silence to fix it.  I'm
612                            not sure if this is the right solution --- maybe the last thing should
613                            be padded `forward' rather than this thing padding `back'.
614                         */
615                         period.from = min(period.from, *_last_audio_time);
616                 }
617                 if (period.duration() > one_video_frame()) {
618                         period.to = period.from + one_video_frame();
619                 }
620                 fill_audio (period);
621                 _silent.set_position (period.to);
622                 break;
623         }
624         case NONE:
625                 done = true;
626                 break;
627         }
628
629         /* Emit any audio that is ready */
630
631         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
632            of our streams, or the position of the _silent.
633         */
634         DCPTime pull_to = _film->length ();
635         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
636                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
637                         pull_to = i->second.last_push_end;
638                 }
639         }
640         if (!_silent.done() && _silent.position() < pull_to) {
641                 pull_to = _silent.position();
642         }
643
644         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
645         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
646                 if (_last_audio_time && i->second < *_last_audio_time) {
647                         /* This new data comes before the last we emitted (or the last seek); discard it */
648                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
649                         if (!cut.first) {
650                                 continue;
651                         }
652                         *i = cut;
653                 } else if (_last_audio_time && i->second > *_last_audio_time) {
654                         /* There's a gap between this data and the last we emitted; fill with silence */
655                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
656                 }
657
658                 emit_audio (i->first, i->second);
659         }
660
661         if (done) {
662                 _shuffler->flush ();
663                 _delay->flush ();
664         }
665         return done;
666 }
667
668 optional<PositionImage>
669 Player::subtitles_for_frame (DCPTime time) const
670 {
671         list<PositionImage> subtitles;
672
673         int const vfr = _film->video_frame_rate();
674
675         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
676
677                 /* Image subtitles */
678                 list<PositionImage> c = transform_image_subtitles (i.image);
679                 copy (c.begin(), c.end(), back_inserter (subtitles));
680
681                 /* Text subtitles (rendered to an image) */
682                 if (!i.text.empty ()) {
683                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
684                         copy (s.begin(), s.end(), back_inserter (subtitles));
685                 }
686         }
687
688         if (subtitles.empty ()) {
689                 return optional<PositionImage> ();
690         }
691
692         return merge (subtitles);
693 }
694
695 void
696 Player::video (weak_ptr<Piece> wp, ContentVideo video)
697 {
698         shared_ptr<Piece> piece = wp.lock ();
699         if (!piece) {
700                 return;
701         }
702
703         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
704         if (frc.skip && (video.frame % 2) == 1) {
705                 return;
706         }
707
708         /* Time of the first frame we will emit */
709         DCPTime const time = content_video_to_dcp (piece, video.frame);
710
711         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
712            if it's after the content's period here as in that case we still need to fill any gap between
713            `now' and the end of the content's period.
714         */
715         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
716                 return;
717         }
718
719         /* Fill gaps that we discover now that we have some video which needs to be emitted.
720            This is where we need to fill to.
721         */
722         DCPTime fill_to = min (time, piece->content->end());
723
724         if (_last_video_time) {
725                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
726                 LastVideoMap::const_iterator last = _last_video.find (wp);
727                 if (_film->three_d()) {
728                         DCPTime j = fill_from;
729                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
730                         if (eyes == EYES_BOTH) {
731                                 eyes = EYES_LEFT;
732                         }
733                         while (j < fill_to || eyes != video.eyes) {
734                                 if (last != _last_video.end()) {
735                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
736                                         copy->set_eyes (eyes);
737                                         emit_video (copy, j);
738                                 } else {
739                                         emit_video (black_player_video_frame(eyes), j);
740                                 }
741                                 if (eyes == EYES_RIGHT) {
742                                         j += one_video_frame();
743                                 }
744                                 eyes = increment_eyes (eyes);
745                         }
746                 } else {
747                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
748                                 if (last != _last_video.end()) {
749                                         emit_video (last->second, j);
750                                 } else {
751                                         emit_video (black_player_video_frame(EYES_BOTH), j);
752                                 }
753                         }
754                 }
755         }
756
757         _last_video[wp].reset (
758                 new PlayerVideo (
759                         video.image,
760                         piece->content->video->crop (),
761                         piece->content->video->fade (video.frame),
762                         piece->content->video->scale().size (
763                                 piece->content->video, _video_container_size, _film->frame_size ()
764                                 ),
765                         _video_container_size,
766                         video.eyes,
767                         video.part,
768                         piece->content->video->colour_conversion ()
769                         )
770                 );
771
772         DCPTime t = time;
773         for (int i = 0; i < frc.repeat; ++i) {
774                 if (t < piece->content->end()) {
775                         emit_video (_last_video[wp], t);
776                 }
777                 t += one_video_frame ();
778         }
779 }
780
781 void
782 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
783 {
784         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
785
786         shared_ptr<Piece> piece = wp.lock ();
787         if (!piece) {
788                 return;
789         }
790
791         shared_ptr<AudioContent> content = piece->content->audio;
792         DCPOMATIC_ASSERT (content);
793
794         /* Compute time in the DCP */
795         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
796         /* And the end of this block in the DCP */
797         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
798
799         /* Remove anything that comes before the start or after the end of the content */
800         if (time < piece->content->position()) {
801                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
802                 if (!cut.first) {
803                         /* This audio is entirely discarded */
804                         return;
805                 }
806                 content_audio.audio = cut.first;
807                 time = cut.second;
808         } else if (time > piece->content->end()) {
809                 /* Discard it all */
810                 return;
811         } else if (end > piece->content->end()) {
812                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
813                 if (remaining_frames == 0) {
814                         return;
815                 }
816                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
817                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
818                 content_audio.audio = cut;
819         }
820
821         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
822
823         /* Gain */
824
825         if (content->gain() != 0) {
826                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
827                 gain->apply_gain (content->gain ());
828                 content_audio.audio = gain;
829         }
830
831         /* Remap */
832
833         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
834
835         /* Process */
836
837         if (_audio_processor) {
838                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
839         }
840
841         /* Push */
842
843         _audio_merger.push (content_audio.audio, time);
844         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
845         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
846 }
847
848 void
849 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
850 {
851         shared_ptr<Piece> piece = wp.lock ();
852         if (!piece) {
853                 return;
854         }
855
856         /* Apply content's subtitle offsets */
857         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
858         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
859
860         /* Apply content's subtitle scale */
861         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
862         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
863
864         /* Apply a corrective translation to keep the subtitle centred after that scale */
865         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
866         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
867
868         PlayerSubtitles ps;
869         ps.image.push_back (subtitle.sub);
870         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
871
872         _active_subtitles.add_from (wp, ps, from);
873 }
874
875 void
876 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
877 {
878         shared_ptr<Piece> piece = wp.lock ();
879         if (!piece) {
880                 return;
881         }
882
883         PlayerSubtitles ps;
884         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
885
886         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
887                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
888                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
889                 float const xs = piece->content->subtitle->x_scale();
890                 float const ys = piece->content->subtitle->y_scale();
891                 float size = s.size();
892
893                 /* Adjust size to express the common part of the scaling;
894                    e.g. if xs = ys = 0.5 we scale size by 2.
895                 */
896                 if (xs > 1e-5 && ys > 1e-5) {
897                         size *= 1 / min (1 / xs, 1 / ys);
898                 }
899                 s.set_size (size);
900
901                 /* Then express aspect ratio changes */
902                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
903                         s.set_aspect_adjust (xs / ys);
904                 }
905
906                 s.set_in (dcp::Time(from.seconds(), 1000));
907                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
908                 ps.add_fonts (piece->content->subtitle->fonts ());
909         }
910
911         _active_subtitles.add_from (wp, ps, from);
912 }
913
914 void
915 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
916 {
917         if (!_active_subtitles.have (wp)) {
918                 return;
919         }
920
921         shared_ptr<Piece> piece = wp.lock ();
922         if (!piece) {
923                 return;
924         }
925
926         DCPTime const dcp_to = content_time_to_dcp (piece, to);
927
928         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
929
930         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
931                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
932         }
933 }
934
935 void
936 Player::seek (DCPTime time, bool accurate)
937 {
938         if (!_have_valid_pieces) {
939                 setup_pieces ();
940         }
941
942         if (_shuffler) {
943                 _shuffler->clear ();
944         }
945
946         if (_delay) {
947                 _delay->clear ();
948         }
949
950         if (_audio_processor) {
951                 _audio_processor->flush ();
952         }
953
954         _audio_merger.clear ();
955         _active_subtitles.clear ();
956
957         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
958                 if (time < i->content->position()) {
959                         /* Before; seek to 0 */
960                         i->decoder->seek (ContentTime(), accurate);
961                         i->done = false;
962                 } else if (i->content->position() <= time && time < i->content->end()) {
963                         /* During; seek to position */
964                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
965                         i->done = false;
966                 } else {
967                         /* After; this piece is done */
968                         i->done = true;
969                 }
970         }
971
972         if (accurate) {
973                 _last_video_time = time;
974                 _last_video_eyes = EYES_LEFT;
975                 _last_audio_time = time;
976         } else {
977                 _last_video_time = optional<DCPTime>();
978                 _last_video_eyes = optional<Eyes>();
979                 _last_audio_time = optional<DCPTime>();
980         }
981
982         _black.set_position (time);
983         _silent.set_position (time);
984
985         _last_video.clear ();
986 }
987
988 void
989 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
990 {
991         optional<PositionImage> subtitles = subtitles_for_frame (time);
992         if (subtitles) {
993                 pv->set_subtitle (subtitles.get ());
994         }
995
996         Video (pv, time);
997
998         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
999                 _last_video_time = time + one_video_frame();
1000                 _active_subtitles.clear_before (time);
1001         }
1002         _last_video_eyes = increment_eyes (pv->eyes());
1003 }
1004
1005 void
1006 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1007 {
1008         /* Log if the assert below is about to fail */
1009         if (_last_audio_time && time != *_last_audio_time) {
1010                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1011         }
1012
1013         /* This audio must follow on from the previous */
1014         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1015         Audio (data, time);
1016         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1017 }
1018
1019 void
1020 Player::fill_audio (DCPTimePeriod period)
1021 {
1022         if (period.from == period.to) {
1023                 return;
1024         }
1025
1026         DCPOMATIC_ASSERT (period.from < period.to);
1027
1028         DCPTime t = period.from;
1029         while (t < period.to) {
1030                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1031                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1032                 if (samples) {
1033                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1034                         silence->make_silent ();
1035                         emit_audio (silence, t);
1036                 }
1037                 t += block;
1038         }
1039 }
1040
1041 DCPTime
1042 Player::one_video_frame () const
1043 {
1044         return DCPTime::from_frames (1, _film->video_frame_rate ());
1045 }
1046
1047 pair<shared_ptr<AudioBuffers>, DCPTime>
1048 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1049 {
1050         DCPTime const discard_time = discard_to - time;
1051         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1052         Frame remaining_frames = audio->frames() - discard_frames;
1053         if (remaining_frames <= 0) {
1054                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1055         }
1056         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1057         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1058         return make_pair(cut, time + discard_time);
1059 }
1060
1061 void
1062 Player::set_dcp_decode_reduction (optional<int> reduction)
1063 {
1064         if (reduction == _dcp_decode_reduction) {
1065                 return;
1066         }
1067
1068         _dcp_decode_reduction = reduction;
1069         _have_valid_pieces = false;
1070         Changed (false);
1071 }