Speed some operations by re-using the last PlayerVideo but with
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include "shuffler.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_subtitle (false)
93         , _always_burn_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102         set_video_container_size (_film->frame_size ());
103
104         film_changed (Film::AUDIO_PROCESSOR);
105
106         seek (DCPTime (), true);
107 }
108
109 Player::~Player ()
110 {
111         delete _shuffler;
112 }
113
114 void
115 Player::setup_pieces ()
116 {
117         _pieces.clear ();
118
119         delete _shuffler;
120         _shuffler = new Shuffler();
121         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
122
123         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
124
125                 if (!i->paths_valid ()) {
126                         continue;
127                 }
128
129                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131
132                 if (!decoder) {
133                         /* Not something that we can decode; e.g. Atmos content */
134                         continue;
135                 }
136
137                 if (decoder->video && _ignore_video) {
138                         decoder->video->set_ignore (true);
139                 }
140
141                 if (decoder->subtitle && _ignore_subtitle) {
142                         decoder->subtitle->set_ignore (true);
143                 }
144
145                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
146                 if (dcp) {
147                         dcp->set_decode_referenced (_play_referenced);
148                         if (_play_referenced) {
149                                 dcp->set_forced_reduction (_dcp_decode_reduction);
150                         }
151                 }
152
153                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
154                 _pieces.push_back (piece);
155
156                 if (decoder->video) {
157                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
158                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
159                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
160                         } else {
161                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
162                         }
163                 }
164
165                 if (decoder->audio) {
166                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
167                 }
168
169                 if (decoder->subtitle) {
170                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
171                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
172                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
173                 }
174         }
175
176         _stream_states.clear ();
177         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
178                 if (i->content->audio) {
179                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
180                                 _stream_states[j] = StreamState (i, i->content->position ());
181                         }
182                 }
183         }
184
185         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
186         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
187
188         _last_video_time = DCPTime ();
189         _last_video_eyes = EYES_BOTH;
190         _last_audio_time = DCPTime ();
191         _have_valid_pieces = true;
192 }
193
194 void
195 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
196 {
197         shared_ptr<Content> c = w.lock ();
198         if (!c) {
199                 return;
200         }
201
202         if (
203                 property == ContentProperty::POSITION ||
204                 property == ContentProperty::LENGTH ||
205                 property == ContentProperty::TRIM_START ||
206                 property == ContentProperty::TRIM_END ||
207                 property == ContentProperty::PATH ||
208                 property == VideoContentProperty::FRAME_TYPE ||
209                 property == DCPContentProperty::NEEDS_ASSETS ||
210                 property == DCPContentProperty::NEEDS_KDM ||
211                 property == SubtitleContentProperty::COLOUR ||
212                 property == SubtitleContentProperty::EFFECT ||
213                 property == SubtitleContentProperty::EFFECT_COLOUR ||
214                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
215                 property == FFmpegContentProperty::FILTERS ||
216                 property == VideoContentProperty::COLOUR_CONVERSION
217                 ) {
218
219                 _have_valid_pieces = false;
220                 Changed (property, frequent);
221
222         } else if (
223                 property == SubtitleContentProperty::LINE_SPACING ||
224                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
225                 property == SubtitleContentProperty::Y_SCALE ||
226                 property == SubtitleContentProperty::FADE_IN ||
227                 property == SubtitleContentProperty::FADE_OUT ||
228                 property == ContentProperty::VIDEO_FRAME_RATE ||
229                 property == SubtitleContentProperty::USE ||
230                 property == SubtitleContentProperty::X_OFFSET ||
231                 property == SubtitleContentProperty::Y_OFFSET ||
232                 property == SubtitleContentProperty::X_SCALE ||
233                 property == SubtitleContentProperty::FONTS ||
234                 property == VideoContentProperty::CROP ||
235                 property == VideoContentProperty::SCALE ||
236                 property == VideoContentProperty::FADE_IN ||
237                 property == VideoContentProperty::FADE_OUT
238                 ) {
239
240                 Changed (property, frequent);
241         }
242 }
243
244 void
245 Player::set_video_container_size (dcp::Size s)
246 {
247         if (s == _video_container_size) {
248                 return;
249         }
250
251         _video_container_size = s;
252
253         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
254         _black_image->make_black ();
255
256         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
257 }
258
259 void
260 Player::playlist_changed ()
261 {
262         _have_valid_pieces = false;
263         Changed (PlayerProperty::PLAYLIST, false);
264 }
265
266 void
267 Player::film_changed (Film::Property p)
268 {
269         /* Here we should notice Film properties that affect our output, and
270            alert listeners that our output now would be different to how it was
271            last time we were run.
272         */
273
274         if (p == Film::CONTAINER) {
275                 Changed (PlayerProperty::FILM_CONTAINER, false);
276         } else if (p == Film::VIDEO_FRAME_RATE) {
277                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
278                    so we need new pieces here.
279                 */
280                 _have_valid_pieces = false;
281                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
282         } else if (p == Film::AUDIO_PROCESSOR) {
283                 if (_film->audio_processor ()) {
284                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
285                 }
286         }
287 }
288
289 list<PositionImage>
290 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
291 {
292         list<PositionImage> all;
293
294         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
295                 if (!i->image) {
296                         continue;
297                 }
298
299                 /* We will scale the subtitle up to fit _video_container_size */
300                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
301
302                 /* Then we need a corrective translation, consisting of two parts:
303                  *
304                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
305                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
306                  *
307                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
308                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
309                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
310                  *
311                  * Combining these two translations gives these expressions.
312                  */
313
314                 all.push_back (
315                         PositionImage (
316                                 i->image->scale (
317                                         scaled_size,
318                                         dcp::YUV_TO_RGB_REC601,
319                                         i->image->pixel_format (),
320                                         true,
321                                         _fast
322                                         ),
323                                 Position<int> (
324                                         lrint (_video_container_size.width * i->rectangle.x),
325                                         lrint (_video_container_size.height * i->rectangle.y)
326                                         )
327                                 )
328                         );
329         }
330
331         return all;
332 }
333
334 shared_ptr<PlayerVideo>
335 Player::black_player_video_frame (Eyes eyes) const
336 {
337         return shared_ptr<PlayerVideo> (
338                 new PlayerVideo (
339                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
340                         Crop (),
341                         optional<double> (),
342                         _video_container_size,
343                         _video_container_size,
344                         eyes,
345                         PART_WHOLE,
346                         PresetColourConversion::all().front().conversion,
347                         boost::weak_ptr<Content>(),
348                         boost::optional<Frame>()
349                 )
350         );
351 }
352
353 Frame
354 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
355 {
356         DCPTime s = t - piece->content->position ();
357         s = min (piece->content->length_after_trim(), s);
358         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
359
360         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
361            then convert that ContentTime to frames at the content's rate.  However this fails for
362            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
363            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
364
365            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
366         */
367         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
368 }
369
370 DCPTime
371 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
372 {
373         /* See comment in dcp_to_content_video */
374         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
375         return d + piece->content->position();
376 }
377
378 Frame
379 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
380 {
381         DCPTime s = t - piece->content->position ();
382         s = min (piece->content->length_after_trim(), s);
383         /* See notes in dcp_to_content_video */
384         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
385 }
386
387 DCPTime
388 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
389 {
390         /* See comment in dcp_to_content_video */
391         return DCPTime::from_frames (f, _film->audio_frame_rate())
392                 - DCPTime (piece->content->trim_start(), piece->frc)
393                 + piece->content->position();
394 }
395
396 ContentTime
397 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
398 {
399         DCPTime s = t - piece->content->position ();
400         s = min (piece->content->length_after_trim(), s);
401         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
402 }
403
404 DCPTime
405 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
406 {
407         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
408 }
409
410 list<shared_ptr<Font> >
411 Player::get_subtitle_fonts ()
412 {
413         if (!_have_valid_pieces) {
414                 setup_pieces ();
415         }
416
417         list<shared_ptr<Font> > fonts;
418         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
419                 if (p->content->subtitle) {
420                         /* XXX: things may go wrong if there are duplicate font IDs
421                            with different font files.
422                         */
423                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
424                         copy (f.begin(), f.end(), back_inserter (fonts));
425                 }
426         }
427
428         return fonts;
429 }
430
431 /** Set this player never to produce any video data */
432 void
433 Player::set_ignore_video ()
434 {
435         _ignore_video = true;
436 }
437
438 void
439 Player::set_ignore_subtitle ()
440 {
441         _ignore_subtitle = true;
442 }
443
444 /** Set whether or not this player should always burn text subtitles into the image,
445  *  regardless of the content settings.
446  *  @param burn true to always burn subtitles, false to obey content settings.
447  */
448 void
449 Player::set_always_burn_subtitles (bool burn)
450 {
451         _always_burn_subtitles = burn;
452 }
453
454 /** Sets up the player to be faster, possibly at the expense of quality */
455 void
456 Player::set_fast ()
457 {
458         _fast = true;
459         _have_valid_pieces = false;
460 }
461
462 void
463 Player::set_play_referenced ()
464 {
465         _play_referenced = true;
466         _have_valid_pieces = false;
467 }
468
469 list<ReferencedReelAsset>
470 Player::get_reel_assets ()
471 {
472         list<ReferencedReelAsset> a;
473
474         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
475                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
476                 if (!j) {
477                         continue;
478                 }
479
480                 scoped_ptr<DCPDecoder> decoder;
481                 try {
482                         decoder.reset (new DCPDecoder (j, _film->log(), false));
483                 } catch (...) {
484                         return a;
485                 }
486
487                 int64_t offset = 0;
488                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
489
490                         DCPOMATIC_ASSERT (j->video_frame_rate ());
491                         double const cfr = j->video_frame_rate().get();
492                         Frame const trim_start = j->trim_start().frames_round (cfr);
493                         Frame const trim_end = j->trim_end().frames_round (cfr);
494                         int const ffr = _film->video_frame_rate ();
495
496                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
497                         if (j->reference_video ()) {
498                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
499                                 DCPOMATIC_ASSERT (ra);
500                                 ra->set_entry_point (ra->entry_point() + trim_start);
501                                 ra->set_duration (ra->duration() - trim_start - trim_end);
502                                 a.push_back (
503                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
504                                         );
505                         }
506
507                         if (j->reference_audio ()) {
508                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
509                                 DCPOMATIC_ASSERT (ra);
510                                 ra->set_entry_point (ra->entry_point() + trim_start);
511                                 ra->set_duration (ra->duration() - trim_start - trim_end);
512                                 a.push_back (
513                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
514                                         );
515                         }
516
517                         if (j->reference_subtitle ()) {
518                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
519                                 DCPOMATIC_ASSERT (ra);
520                                 ra->set_entry_point (ra->entry_point() + trim_start);
521                                 ra->set_duration (ra->duration() - trim_start - trim_end);
522                                 a.push_back (
523                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
524                                         );
525                         }
526
527                         /* Assume that main picture duration is the length of the reel */
528                         offset += k->main_picture()->duration ();
529                 }
530         }
531
532         return a;
533 }
534
535 bool
536 Player::pass ()
537 {
538         if (!_have_valid_pieces) {
539                 setup_pieces ();
540         }
541
542         if (_playlist->length() == DCPTime()) {
543                 /* Special case of an empty Film; just give one black frame */
544                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
545                 return true;
546         }
547
548         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
549
550         shared_ptr<Piece> earliest_content;
551         optional<DCPTime> earliest_time;
552
553         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
554                 if (i->done) {
555                         continue;
556                 }
557
558                 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
559                 if (t > i->content->end()) {
560                         i->done = true;
561                 } else {
562
563                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
564                            the video.
565                         */
566                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
567                                 earliest_time = t;
568                                 earliest_content = i;
569                         }
570                 }
571         }
572
573         bool done = false;
574
575         enum {
576                 NONE,
577                 CONTENT,
578                 BLACK,
579                 SILENT
580         } which = NONE;
581
582         if (earliest_content) {
583                 which = CONTENT;
584         }
585
586         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
587                 earliest_time = _black.position ();
588                 which = BLACK;
589         }
590
591         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
592                 earliest_time = _silent.position ();
593                 which = SILENT;
594         }
595
596         switch (which) {
597         case CONTENT:
598                 earliest_content->done = earliest_content->decoder->pass ();
599                 break;
600         case BLACK:
601                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
602                 _black.set_position (_black.position() + one_video_frame());
603                 break;
604         case SILENT:
605         {
606                 DCPTimePeriod period (_silent.period_at_position());
607                 if (_last_audio_time) {
608                         /* Sometimes the thing that happened last finishes fractionally before
609                            this silence.  Bodge the start time of the silence to fix it.  I'm
610                            not sure if this is the right solution --- maybe the last thing should
611                            be padded `forward' rather than this thing padding `back'.
612                         */
613                         period.from = min(period.from, *_last_audio_time);
614                 }
615                 if (period.duration() > one_video_frame()) {
616                         period.to = period.from + one_video_frame();
617                 }
618                 fill_audio (period);
619                 _silent.set_position (period.to);
620                 break;
621         }
622         case NONE:
623                 done = true;
624                 break;
625         }
626
627         /* Emit any audio that is ready */
628
629         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
630            of our streams, or the position of the _silent.
631         */
632         DCPTime pull_to = _film->length ();
633         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
634                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
635                         pull_to = i->second.last_push_end;
636                 }
637         }
638         if (!_silent.done() && _silent.position() < pull_to) {
639                 pull_to = _silent.position();
640         }
641
642         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
643         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
644                 if (_last_audio_time && i->second < *_last_audio_time) {
645                         /* This new data comes before the last we emitted (or the last seek); discard it */
646                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
647                         if (!cut.first) {
648                                 continue;
649                         }
650                         *i = cut;
651                 } else if (_last_audio_time && i->second > *_last_audio_time) {
652                         /* There's a gap between this data and the last we emitted; fill with silence */
653                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
654                 }
655
656                 emit_audio (i->first, i->second);
657         }
658
659         if (done) {
660                 _shuffler->flush ();
661                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
662                         do_emit_video(i->first, i->second);
663                 }
664         }
665
666         return done;
667 }
668
669 optional<PositionImage>
670 Player::subtitles_for_frame (DCPTime time) const
671 {
672         list<PositionImage> subtitles;
673
674         int const vfr = _film->video_frame_rate();
675
676         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
677
678                 /* Image subtitles */
679                 list<PositionImage> c = transform_image_subtitles (i.image);
680                 copy (c.begin(), c.end(), back_inserter (subtitles));
681
682                 /* Text subtitles (rendered to an image) */
683                 if (!i.text.empty ()) {
684                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
685                         copy (s.begin(), s.end(), back_inserter (subtitles));
686                 }
687         }
688
689         if (subtitles.empty ()) {
690                 return optional<PositionImage> ();
691         }
692
693         return merge (subtitles);
694 }
695
696 void
697 Player::video (weak_ptr<Piece> wp, ContentVideo video)
698 {
699         shared_ptr<Piece> piece = wp.lock ();
700         if (!piece) {
701                 return;
702         }
703
704         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
705         if (frc.skip && (video.frame % 2) == 1) {
706                 return;
707         }
708
709         /* Time of the first frame we will emit */
710         DCPTime const time = content_video_to_dcp (piece, video.frame);
711
712         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
713            if it's after the content's period here as in that case we still need to fill any gap between
714            `now' and the end of the content's period.
715         */
716         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
717                 return;
718         }
719
720         /* Fill gaps that we discover now that we have some video which needs to be emitted.
721            This is where we need to fill to.
722         */
723         DCPTime fill_to = min (time, piece->content->end());
724
725         if (_last_video_time) {
726                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
727                 LastVideoMap::const_iterator last = _last_video.find (wp);
728                 if (_film->three_d()) {
729                         DCPTime j = fill_from;
730                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
731                         if (eyes == EYES_BOTH) {
732                                 eyes = EYES_LEFT;
733                         }
734                         while (j < fill_to || eyes != video.eyes) {
735                                 if (last != _last_video.end()) {
736                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
737                                         copy->set_eyes (eyes);
738                                         emit_video (copy, j);
739                                 } else {
740                                         emit_video (black_player_video_frame(eyes), j);
741                                 }
742                                 if (eyes == EYES_RIGHT) {
743                                         j += one_video_frame();
744                                 }
745                                 eyes = increment_eyes (eyes);
746                         }
747                 } else {
748                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
749                                 if (last != _last_video.end()) {
750                                         emit_video (last->second, j);
751                                 } else {
752                                         emit_video (black_player_video_frame(EYES_BOTH), j);
753                                 }
754                         }
755                 }
756         }
757
758         _last_video[wp].reset (
759                 new PlayerVideo (
760                         video.image,
761                         piece->content->video->crop (),
762                         piece->content->video->fade (video.frame),
763                         piece->content->video->scale().size (
764                                 piece->content->video, _video_container_size, _film->frame_size ()
765                                 ),
766                         _video_container_size,
767                         video.eyes,
768                         video.part,
769                         piece->content->video->colour_conversion(),
770                         piece->content,
771                         video.frame
772                         )
773                 );
774
775         DCPTime t = time;
776         for (int i = 0; i < frc.repeat; ++i) {
777                 if (t < piece->content->end()) {
778                         emit_video (_last_video[wp], t);
779                 }
780                 t += one_video_frame ();
781         }
782 }
783
784 void
785 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
786 {
787         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
788
789         shared_ptr<Piece> piece = wp.lock ();
790         if (!piece) {
791                 return;
792         }
793
794         shared_ptr<AudioContent> content = piece->content->audio;
795         DCPOMATIC_ASSERT (content);
796
797         /* Compute time in the DCP */
798         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
799         /* And the end of this block in the DCP */
800         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
801
802         /* Remove anything that comes before the start or after the end of the content */
803         if (time < piece->content->position()) {
804                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
805                 if (!cut.first) {
806                         /* This audio is entirely discarded */
807                         return;
808                 }
809                 content_audio.audio = cut.first;
810                 time = cut.second;
811         } else if (time > piece->content->end()) {
812                 /* Discard it all */
813                 return;
814         } else if (end > piece->content->end()) {
815                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
816                 if (remaining_frames == 0) {
817                         return;
818                 }
819                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
820                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
821                 content_audio.audio = cut;
822         }
823
824         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
825
826         /* Gain */
827
828         if (content->gain() != 0) {
829                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
830                 gain->apply_gain (content->gain ());
831                 content_audio.audio = gain;
832         }
833
834         /* Remap */
835
836         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
837
838         /* Process */
839
840         if (_audio_processor) {
841                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
842         }
843
844         /* Push */
845
846         _audio_merger.push (content_audio.audio, time);
847         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
848         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
849 }
850
851 void
852 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
853 {
854         shared_ptr<Piece> piece = wp.lock ();
855         if (!piece) {
856                 return;
857         }
858
859         /* Apply content's subtitle offsets */
860         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
861         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
862
863         /* Apply content's subtitle scale */
864         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
865         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
866
867         /* Apply a corrective translation to keep the subtitle centred after that scale */
868         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
869         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
870
871         PlayerSubtitles ps;
872         ps.image.push_back (subtitle.sub);
873         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
874
875         _active_subtitles.add_from (wp, ps, from);
876 }
877
878 void
879 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
880 {
881         shared_ptr<Piece> piece = wp.lock ();
882         if (!piece) {
883                 return;
884         }
885
886         PlayerSubtitles ps;
887         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
888
889         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
890                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
891                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
892                 float const xs = piece->content->subtitle->x_scale();
893                 float const ys = piece->content->subtitle->y_scale();
894                 float size = s.size();
895
896                 /* Adjust size to express the common part of the scaling;
897                    e.g. if xs = ys = 0.5 we scale size by 2.
898                 */
899                 if (xs > 1e-5 && ys > 1e-5) {
900                         size *= 1 / min (1 / xs, 1 / ys);
901                 }
902                 s.set_size (size);
903
904                 /* Then express aspect ratio changes */
905                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
906                         s.set_aspect_adjust (xs / ys);
907                 }
908
909                 s.set_in (dcp::Time(from.seconds(), 1000));
910                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
911                 ps.add_fonts (piece->content->subtitle->fonts ());
912         }
913
914         _active_subtitles.add_from (wp, ps, from);
915 }
916
917 void
918 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
919 {
920         if (!_active_subtitles.have (wp)) {
921                 return;
922         }
923
924         shared_ptr<Piece> piece = wp.lock ();
925         if (!piece) {
926                 return;
927         }
928
929         DCPTime const dcp_to = content_time_to_dcp (piece, to);
930
931         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
932
933         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
934                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
935         }
936 }
937
938 void
939 Player::seek (DCPTime time, bool accurate)
940 {
941         if (!_have_valid_pieces) {
942                 setup_pieces ();
943         }
944
945         if (_shuffler) {
946                 _shuffler->clear ();
947         }
948
949         _delay.clear ();
950
951         if (_audio_processor) {
952                 _audio_processor->flush ();
953         }
954
955         _audio_merger.clear ();
956         _active_subtitles.clear ();
957
958         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
959                 if (time < i->content->position()) {
960                         /* Before; seek to 0 */
961                         i->decoder->seek (ContentTime(), accurate);
962                         i->done = false;
963                 } else if (i->content->position() <= time && time < i->content->end()) {
964                         /* During; seek to position */
965                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
966                         i->done = false;
967                 } else {
968                         /* After; this piece is done */
969                         i->done = true;
970                 }
971         }
972
973         if (accurate) {
974                 _last_video_time = time;
975                 _last_video_eyes = EYES_LEFT;
976                 _last_audio_time = time;
977         } else {
978                 _last_video_time = optional<DCPTime>();
979                 _last_video_eyes = optional<Eyes>();
980                 _last_audio_time = optional<DCPTime>();
981         }
982
983         _black.set_position (time);
984         _silent.set_position (time);
985
986         _last_video.clear ();
987 }
988
989 void
990 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
991 {
992         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
993            player before the video that requires them.
994         */
995         _delay.push_back (make_pair (pv, time));
996
997         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
998                 _last_video_time = time + one_video_frame();
999         }
1000         _last_video_eyes = increment_eyes (pv->eyes());
1001
1002         if (_delay.size() < 3) {
1003                 return;
1004         }
1005
1006         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1007         _delay.pop_front();
1008         do_emit_video (to_do.first, to_do.second);
1009 }
1010
1011 void
1012 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1013 {
1014         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1015                 _active_subtitles.clear_before (time);
1016         }
1017
1018         optional<PositionImage> subtitles = subtitles_for_frame (time);
1019         if (subtitles) {
1020                 pv->set_subtitle (subtitles.get ());
1021         }
1022
1023         Video (pv, time);
1024 }
1025
1026 void
1027 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1028 {
1029         /* Log if the assert below is about to fail */
1030         if (_last_audio_time && time != *_last_audio_time) {
1031                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1032         }
1033
1034         /* This audio must follow on from the previous */
1035         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1036         Audio (data, time);
1037         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1038 }
1039
1040 void
1041 Player::fill_audio (DCPTimePeriod period)
1042 {
1043         if (period.from == period.to) {
1044                 return;
1045         }
1046
1047         DCPOMATIC_ASSERT (period.from < period.to);
1048
1049         DCPTime t = period.from;
1050         while (t < period.to) {
1051                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1052                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1053                 if (samples) {
1054                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1055                         silence->make_silent ();
1056                         emit_audio (silence, t);
1057                 }
1058                 t += block;
1059         }
1060 }
1061
1062 DCPTime
1063 Player::one_video_frame () const
1064 {
1065         return DCPTime::from_frames (1, _film->video_frame_rate ());
1066 }
1067
1068 pair<shared_ptr<AudioBuffers>, DCPTime>
1069 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1070 {
1071         DCPTime const discard_time = discard_to - time;
1072         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1073         Frame remaining_frames = audio->frames() - discard_frames;
1074         if (remaining_frames <= 0) {
1075                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1076         }
1077         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1078         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1079         return make_pair(cut, time + discard_time);
1080 }
1081
1082 void
1083 Player::set_dcp_decode_reduction (optional<int> reduction)
1084 {
1085         if (reduction == _dcp_decode_reduction) {
1086                 return;
1087         }
1088
1089         _dcp_decode_reduction = reduction;
1090         _have_valid_pieces = false;
1091         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1092 }