Prior to 2537a2d Decoder::position() was not updated if a decoder emitted
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include "shuffler.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_subtitle (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _audio_merger (_film->audio_frame_rate())
91         , _shuffler (0)
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 Player::~Player ()
104 {
105         delete _shuffler;
106 }
107
108 void
109 Player::setup_pieces ()
110 {
111         _pieces.clear ();
112
113         delete _shuffler;
114         _shuffler = new Shuffler();
115         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
116
117         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
118
119                 if (!i->paths_valid ()) {
120                         continue;
121                 }
122
123                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
124                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
125
126                 if (!decoder) {
127                         /* Not something that we can decode; e.g. Atmos content */
128                         continue;
129                 }
130
131                 if (decoder->video && _ignore_video) {
132                         decoder->video->set_ignore ();
133                 }
134
135                 if (decoder->subtitle && _ignore_subtitle) {
136                         decoder->subtitle->set_ignore ();
137                 }
138
139                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
140                 if (dcp && _play_referenced) {
141                         if (_play_referenced) {
142                                 dcp->set_decode_referenced ();
143                         }
144                         dcp->set_forced_reduction (_dcp_decode_reduction);
145                 }
146
147                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
148                 _pieces.push_back (piece);
149
150                 if (decoder->video) {
151                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
152                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
153                         } else {
154                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
155                         }
156                 }
157
158                 if (decoder->audio) {
159                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
160                 }
161
162                 if (decoder->subtitle) {
163                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
164                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
165                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
166                 }
167         }
168
169         _stream_states.clear ();
170         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
171                 if (i->content->audio) {
172                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
173                                 _stream_states[j] = StreamState (i, i->content->position ());
174                         }
175                 }
176         }
177
178         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
179         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
180
181         _last_video_time = DCPTime ();
182         _last_video_eyes = EYES_BOTH;
183         _last_audio_time = DCPTime ();
184         _have_valid_pieces = true;
185 }
186
187 void
188 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
189 {
190         shared_ptr<Content> c = w.lock ();
191         if (!c) {
192                 return;
193         }
194
195         if (
196                 property == ContentProperty::POSITION ||
197                 property == ContentProperty::LENGTH ||
198                 property == ContentProperty::TRIM_START ||
199                 property == ContentProperty::TRIM_END ||
200                 property == ContentProperty::PATH ||
201                 property == VideoContentProperty::FRAME_TYPE ||
202                 property == DCPContentProperty::NEEDS_ASSETS ||
203                 property == DCPContentProperty::NEEDS_KDM ||
204                 property == SubtitleContentProperty::COLOUR ||
205                 property == SubtitleContentProperty::EFFECT ||
206                 property == SubtitleContentProperty::EFFECT_COLOUR ||
207                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
208                 property == FFmpegContentProperty::FILTERS ||
209                 property == VideoContentProperty::COLOUR_CONVERSION
210                 ) {
211
212                 _have_valid_pieces = false;
213                 Changed (frequent);
214
215         } else if (
216                 property == SubtitleContentProperty::LINE_SPACING ||
217                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
218                 property == SubtitleContentProperty::Y_SCALE ||
219                 property == SubtitleContentProperty::FADE_IN ||
220                 property == SubtitleContentProperty::FADE_OUT ||
221                 property == ContentProperty::VIDEO_FRAME_RATE ||
222                 property == SubtitleContentProperty::USE ||
223                 property == SubtitleContentProperty::X_OFFSET ||
224                 property == SubtitleContentProperty::Y_OFFSET ||
225                 property == SubtitleContentProperty::X_SCALE ||
226                 property == SubtitleContentProperty::FONTS ||
227                 property == VideoContentProperty::CROP ||
228                 property == VideoContentProperty::SCALE ||
229                 property == VideoContentProperty::FADE_IN ||
230                 property == VideoContentProperty::FADE_OUT
231                 ) {
232
233                 Changed (frequent);
234         }
235 }
236
237 void
238 Player::set_video_container_size (dcp::Size s)
239 {
240         if (s == _video_container_size) {
241                 return;
242         }
243
244         _video_container_size = s;
245
246         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
247         _black_image->make_black ();
248
249         Changed (false);
250 }
251
252 void
253 Player::playlist_changed ()
254 {
255         _have_valid_pieces = false;
256         Changed (false);
257 }
258
259 void
260 Player::film_changed (Film::Property p)
261 {
262         /* Here we should notice Film properties that affect our output, and
263            alert listeners that our output now would be different to how it was
264            last time we were run.
265         */
266
267         if (p == Film::CONTAINER) {
268                 Changed (false);
269         } else if (p == Film::VIDEO_FRAME_RATE) {
270                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
271                    so we need new pieces here.
272                 */
273                 _have_valid_pieces = false;
274                 Changed (false);
275         } else if (p == Film::AUDIO_PROCESSOR) {
276                 if (_film->audio_processor ()) {
277                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
278                 }
279         }
280 }
281
282 list<PositionImage>
283 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
284 {
285         list<PositionImage> all;
286
287         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
288                 if (!i->image) {
289                         continue;
290                 }
291
292                 /* We will scale the subtitle up to fit _video_container_size */
293                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
294
295                 /* Then we need a corrective translation, consisting of two parts:
296                  *
297                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
298                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
299                  *
300                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
301                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
302                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
303                  *
304                  * Combining these two translations gives these expressions.
305                  */
306
307                 all.push_back (
308                         PositionImage (
309                                 i->image->scale (
310                                         scaled_size,
311                                         dcp::YUV_TO_RGB_REC601,
312                                         i->image->pixel_format (),
313                                         true,
314                                         _fast
315                                         ),
316                                 Position<int> (
317                                         lrint (_video_container_size.width * i->rectangle.x),
318                                         lrint (_video_container_size.height * i->rectangle.y)
319                                         )
320                                 )
321                         );
322         }
323
324         return all;
325 }
326
327 shared_ptr<PlayerVideo>
328 Player::black_player_video_frame (Eyes eyes) const
329 {
330         return shared_ptr<PlayerVideo> (
331                 new PlayerVideo (
332                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
333                         Crop (),
334                         optional<double> (),
335                         _video_container_size,
336                         _video_container_size,
337                         eyes,
338                         PART_WHOLE,
339                         PresetColourConversion::all().front().conversion
340                 )
341         );
342 }
343
344 Frame
345 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
346 {
347         DCPTime s = t - piece->content->position ();
348         s = min (piece->content->length_after_trim(), s);
349         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
350
351         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
352            then convert that ContentTime to frames at the content's rate.  However this fails for
353            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
354            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
355
356            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
357         */
358         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
359 }
360
361 DCPTime
362 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
363 {
364         /* See comment in dcp_to_content_video */
365         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
366         return d + piece->content->position();
367 }
368
369 Frame
370 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
371 {
372         DCPTime s = t - piece->content->position ();
373         s = min (piece->content->length_after_trim(), s);
374         /* See notes in dcp_to_content_video */
375         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
376 }
377
378 DCPTime
379 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
380 {
381         /* See comment in dcp_to_content_video */
382         return DCPTime::from_frames (f, _film->audio_frame_rate())
383                 - DCPTime (piece->content->trim_start(), piece->frc)
384                 + piece->content->position();
385 }
386
387 ContentTime
388 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
389 {
390         DCPTime s = t - piece->content->position ();
391         s = min (piece->content->length_after_trim(), s);
392         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
393 }
394
395 DCPTime
396 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
397 {
398         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
399 }
400
401 list<shared_ptr<Font> >
402 Player::get_subtitle_fonts ()
403 {
404         if (!_have_valid_pieces) {
405                 setup_pieces ();
406         }
407
408         list<shared_ptr<Font> > fonts;
409         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
410                 if (p->content->subtitle) {
411                         /* XXX: things may go wrong if there are duplicate font IDs
412                            with different font files.
413                         */
414                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
415                         copy (f.begin(), f.end(), back_inserter (fonts));
416                 }
417         }
418
419         return fonts;
420 }
421
422 /** Set this player never to produce any video data */
423 void
424 Player::set_ignore_video ()
425 {
426         _ignore_video = true;
427 }
428
429 void
430 Player::set_ignore_subtitle ()
431 {
432         _ignore_subtitle = true;
433 }
434
435 /** Set whether or not this player should always burn text subtitles into the image,
436  *  regardless of the content settings.
437  *  @param burn true to always burn subtitles, false to obey content settings.
438  */
439 void
440 Player::set_always_burn_subtitles (bool burn)
441 {
442         _always_burn_subtitles = burn;
443 }
444
445 /** Sets up the player to be faster, possibly at the expense of quality */
446 void
447 Player::set_fast ()
448 {
449         _fast = true;
450         _have_valid_pieces = false;
451 }
452
453 void
454 Player::set_play_referenced ()
455 {
456         _play_referenced = true;
457         _have_valid_pieces = false;
458 }
459
460 list<ReferencedReelAsset>
461 Player::get_reel_assets ()
462 {
463         list<ReferencedReelAsset> a;
464
465         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
466                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
467                 if (!j) {
468                         continue;
469                 }
470
471                 scoped_ptr<DCPDecoder> decoder;
472                 try {
473                         decoder.reset (new DCPDecoder (j, _film->log(), false));
474                 } catch (...) {
475                         return a;
476                 }
477
478                 int64_t offset = 0;
479                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
480
481                         DCPOMATIC_ASSERT (j->video_frame_rate ());
482                         double const cfr = j->video_frame_rate().get();
483                         Frame const trim_start = j->trim_start().frames_round (cfr);
484                         Frame const trim_end = j->trim_end().frames_round (cfr);
485                         int const ffr = _film->video_frame_rate ();
486
487                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
488                         if (j->reference_video ()) {
489                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
490                                 DCPOMATIC_ASSERT (ra);
491                                 ra->set_entry_point (ra->entry_point() + trim_start);
492                                 ra->set_duration (ra->duration() - trim_start - trim_end);
493                                 a.push_back (
494                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
495                                         );
496                         }
497
498                         if (j->reference_audio ()) {
499                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
500                                 DCPOMATIC_ASSERT (ra);
501                                 ra->set_entry_point (ra->entry_point() + trim_start);
502                                 ra->set_duration (ra->duration() - trim_start - trim_end);
503                                 a.push_back (
504                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
505                                         );
506                         }
507
508                         if (j->reference_subtitle ()) {
509                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
510                                 DCPOMATIC_ASSERT (ra);
511                                 ra->set_entry_point (ra->entry_point() + trim_start);
512                                 ra->set_duration (ra->duration() - trim_start - trim_end);
513                                 a.push_back (
514                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
515                                         );
516                         }
517
518                         /* Assume that main picture duration is the length of the reel */
519                         offset += k->main_picture()->duration ();
520                 }
521         }
522
523         return a;
524 }
525
526 bool
527 Player::pass ()
528 {
529         if (!_have_valid_pieces) {
530                 setup_pieces ();
531         }
532
533         if (_playlist->length() == DCPTime()) {
534                 /* Special case of an empty Film; just give one black frame */
535                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
536                 return true;
537         }
538
539         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
540
541         shared_ptr<Piece> earliest_content;
542         optional<DCPTime> earliest_time;
543
544         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
545                 if (i->done) {
546                         continue;
547                 }
548
549                 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
550                 if (t > i->content->end()) {
551                         i->done = true;
552                 } else {
553
554                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
555                            the video.
556                         */
557                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
558                                 earliest_time = t;
559                                 earliest_content = i;
560                         }
561                 }
562         }
563
564         bool done = false;
565
566         enum {
567                 NONE,
568                 CONTENT,
569                 BLACK,
570                 SILENT
571         } which = NONE;
572
573         if (earliest_content) {
574                 which = CONTENT;
575         }
576
577         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
578                 earliest_time = _black.position ();
579                 which = BLACK;
580         }
581
582         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
583                 earliest_time = _silent.position ();
584                 which = SILENT;
585         }
586
587         switch (which) {
588         case CONTENT:
589                 earliest_content->done = earliest_content->decoder->pass ();
590                 break;
591         case BLACK:
592                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
593                 _black.set_position (_black.position() + one_video_frame());
594                 break;
595         case SILENT:
596         {
597                 DCPTimePeriod period (_silent.period_at_position());
598                 if (_last_audio_time) {
599                         /* Sometimes the thing that happened last finishes fractionally before
600                            this silence.  Bodge the start time of the silence to fix it.  I'm
601                            not sure if this is the right solution --- maybe the last thing should
602                            be padded `forward' rather than this thing padding `back'.
603                         */
604                         period.from = min(period.from, *_last_audio_time);
605                 }
606                 if (period.duration() > one_video_frame()) {
607                         period.to = period.from + one_video_frame();
608                 }
609                 fill_audio (period);
610                 _silent.set_position (period.to);
611                 break;
612         }
613         case NONE:
614                 done = true;
615                 break;
616         }
617
618         /* Emit any audio that is ready */
619
620         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
621            of our streams, or the position of the _silent.
622         */
623         DCPTime pull_to = _film->length ();
624         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
625                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
626                         pull_to = i->second.last_push_end;
627                 }
628         }
629         if (!_silent.done() && _silent.position() < pull_to) {
630                 pull_to = _silent.position();
631         }
632
633         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
634         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
635                 if (_last_audio_time && i->second < *_last_audio_time) {
636                         /* This new data comes before the last we emitted (or the last seek); discard it */
637                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
638                         if (!cut.first) {
639                                 continue;
640                         }
641                         *i = cut;
642                 } else if (_last_audio_time && i->second > *_last_audio_time) {
643                         /* There's a gap between this data and the last we emitted; fill with silence */
644                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
645                 }
646
647                 emit_audio (i->first, i->second);
648         }
649
650         if (done) {
651                 _shuffler->flush ();
652         }
653         return done;
654 }
655
656 optional<PositionImage>
657 Player::subtitles_for_frame (DCPTime time) const
658 {
659         list<PositionImage> subtitles;
660
661         int const vfr = _film->video_frame_rate();
662
663         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_subtitles)) {
664
665                 /* Image subtitles */
666                 list<PositionImage> c = transform_image_subtitles (i.image);
667                 copy (c.begin(), c.end(), back_inserter (subtitles));
668
669                 /* Text subtitles (rendered to an image) */
670                 if (!i.text.empty ()) {
671                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time, vfr);
672                         copy (s.begin(), s.end(), back_inserter (subtitles));
673                 }
674         }
675
676         if (subtitles.empty ()) {
677                 return optional<PositionImage> ();
678         }
679
680         return merge (subtitles);
681 }
682
683 void
684 Player::video (weak_ptr<Piece> wp, ContentVideo video)
685 {
686         shared_ptr<Piece> piece = wp.lock ();
687         if (!piece) {
688                 return;
689         }
690
691         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
692         if (frc.skip && (video.frame % 2) == 1) {
693                 return;
694         }
695
696         /* Time of the first frame we will emit */
697         DCPTime const time = content_video_to_dcp (piece, video.frame);
698
699         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
700            if it's after the content's period here as in that case we still need to fill any gap between
701            `now' and the end of the content's period.
702         */
703         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
704                 return;
705         }
706
707
708         /* Fill gaps that we discover now that we have some video which needs to be emitted.
709            This is where we need to fill to.
710         */
711         DCPTime fill_to = min (time, piece->content->end());
712
713         if (_last_video_time) {
714                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
715                 LastVideoMap::const_iterator last = _last_video.find (wp);
716                 if (_film->three_d()) {
717                         DCPTime j = fill_from;
718                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
719                         if (eyes == EYES_BOTH) {
720                                 eyes = EYES_LEFT;
721                         }
722                         while (j < fill_to || eyes != video.eyes) {
723                                 if (last != _last_video.end()) {
724                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
725                                         copy->set_eyes (eyes);
726                                         emit_video (copy, j);
727                                 } else {
728                                         emit_video (black_player_video_frame(eyes), j);
729                                 }
730                                 if (eyes == EYES_RIGHT) {
731                                         j += one_video_frame();
732                                 }
733                                 eyes = increment_eyes (eyes);
734                         }
735                 } else {
736                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
737                                 if (last != _last_video.end()) {
738                                         emit_video (last->second, j);
739                                 } else {
740                                         emit_video (black_player_video_frame(EYES_BOTH), j);
741                                 }
742                         }
743                 }
744         }
745
746         _last_video[wp].reset (
747                 new PlayerVideo (
748                         video.image,
749                         piece->content->video->crop (),
750                         piece->content->video->fade (video.frame),
751                         piece->content->video->scale().size (
752                                 piece->content->video, _video_container_size, _film->frame_size ()
753                                 ),
754                         _video_container_size,
755                         video.eyes,
756                         video.part,
757                         piece->content->video->colour_conversion ()
758                         )
759                 );
760
761         DCPTime t = time;
762         for (int i = 0; i < frc.repeat; ++i) {
763                 if (t < piece->content->end()) {
764                         emit_video (_last_video[wp], t);
765                 }
766                 t += one_video_frame ();
767         }
768 }
769
770 void
771 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
772 {
773         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
774
775         shared_ptr<Piece> piece = wp.lock ();
776         if (!piece) {
777                 return;
778         }
779
780         shared_ptr<AudioContent> content = piece->content->audio;
781         DCPOMATIC_ASSERT (content);
782
783         /* Compute time in the DCP */
784         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
785         /* And the end of this block in the DCP */
786         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
787
788         /* Remove anything that comes before the start or after the end of the content */
789         if (time < piece->content->position()) {
790                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
791                 if (!cut.first) {
792                         /* This audio is entirely discarded */
793                         return;
794                 }
795                 content_audio.audio = cut.first;
796                 time = cut.second;
797         } else if (time > piece->content->end()) {
798                 /* Discard it all */
799                 return;
800         } else if (end > piece->content->end()) {
801                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
802                 if (remaining_frames == 0) {
803                         return;
804                 }
805                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
806                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
807                 content_audio.audio = cut;
808         }
809
810         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
811
812         /* Gain */
813
814         if (content->gain() != 0) {
815                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
816                 gain->apply_gain (content->gain ());
817                 content_audio.audio = gain;
818         }
819
820         /* Remap */
821
822         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
823
824         /* Process */
825
826         if (_audio_processor) {
827                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
828         }
829
830         /* Push */
831
832         _audio_merger.push (content_audio.audio, time);
833         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
834         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
835 }
836
837 void
838 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
839 {
840         shared_ptr<Piece> piece = wp.lock ();
841         if (!piece) {
842                 return;
843         }
844
845         /* Apply content's subtitle offsets */
846         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
847         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
848
849         /* Apply content's subtitle scale */
850         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
851         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
852
853         /* Apply a corrective translation to keep the subtitle centred after that scale */
854         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
855         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
856
857         PlayerSubtitles ps;
858         ps.image.push_back (subtitle.sub);
859         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
860
861         _active_subtitles.add_from (wp, ps, from);
862 }
863
864 void
865 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
866 {
867         shared_ptr<Piece> piece = wp.lock ();
868         if (!piece) {
869                 return;
870         }
871
872         PlayerSubtitles ps;
873         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
874
875         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
876                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
877                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
878                 float const xs = piece->content->subtitle->x_scale();
879                 float const ys = piece->content->subtitle->y_scale();
880                 float size = s.size();
881
882                 /* Adjust size to express the common part of the scaling;
883                    e.g. if xs = ys = 0.5 we scale size by 2.
884                 */
885                 if (xs > 1e-5 && ys > 1e-5) {
886                         size *= 1 / min (1 / xs, 1 / ys);
887                 }
888                 s.set_size (size);
889
890                 /* Then express aspect ratio changes */
891                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
892                         s.set_aspect_adjust (xs / ys);
893                 }
894
895                 s.set_in (dcp::Time(from.seconds(), 1000));
896                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
897                 ps.add_fonts (piece->content->subtitle->fonts ());
898         }
899
900         _active_subtitles.add_from (wp, ps, from);
901 }
902
903 void
904 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
905 {
906         if (!_active_subtitles.have (wp)) {
907                 return;
908         }
909
910         shared_ptr<Piece> piece = wp.lock ();
911         if (!piece) {
912                 return;
913         }
914
915         DCPTime const dcp_to = content_time_to_dcp (piece, to);
916
917         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
918
919         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
920                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
921         }
922 }
923
924 void
925 Player::seek (DCPTime time, bool accurate)
926 {
927         if (!_have_valid_pieces) {
928                 setup_pieces ();
929         }
930
931         if (_shuffler) {
932                 _shuffler->clear ();
933         }
934
935         if (_audio_processor) {
936                 _audio_processor->flush ();
937         }
938
939         _audio_merger.clear ();
940         _active_subtitles.clear ();
941
942         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
943                 if (time < i->content->position()) {
944                         /* Before; seek to 0 */
945                         i->decoder->seek (ContentTime(), accurate);
946                         i->done = false;
947                 } else if (i->content->position() <= time && time < i->content->end()) {
948                         /* During; seek to position */
949                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
950                         i->done = false;
951                 } else {
952                         /* After; this piece is done */
953                         i->done = true;
954                 }
955         }
956
957         if (accurate) {
958                 _last_video_time = time;
959                 _last_video_eyes = EYES_LEFT;
960                 _last_audio_time = time;
961         } else {
962                 _last_video_time = optional<DCPTime>();
963                 _last_video_eyes = optional<Eyes>();
964                 _last_audio_time = optional<DCPTime>();
965         }
966
967         _black.set_position (time);
968         _silent.set_position (time);
969
970         _last_video.clear ();
971 }
972
973 void
974 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
975 {
976         optional<PositionImage> subtitles = subtitles_for_frame (time);
977         if (subtitles) {
978                 pv->set_subtitle (subtitles.get ());
979         }
980
981         Video (pv, time);
982
983         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
984                 _last_video_time = time + one_video_frame();
985                 _active_subtitles.clear_before (time);
986         }
987         _last_video_eyes = increment_eyes (pv->eyes());
988 }
989
990 void
991 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
992 {
993         /* This audio must follow on from the previous */
994         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
995         Audio (data, time);
996         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
997 }
998
999 void
1000 Player::fill_audio (DCPTimePeriod period)
1001 {
1002         if (period.from == period.to) {
1003                 return;
1004         }
1005
1006         DCPOMATIC_ASSERT (period.from < period.to);
1007
1008         DCPTime t = period.from;
1009         while (t < period.to) {
1010                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1011                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1012                 if (samples) {
1013                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1014                         silence->make_silent ();
1015                         emit_audio (silence, t);
1016                 }
1017                 t += block;
1018         }
1019 }
1020
1021 DCPTime
1022 Player::one_video_frame () const
1023 {
1024         return DCPTime::from_frames (1, _film->video_frame_rate ());
1025 }
1026
1027 pair<shared_ptr<AudioBuffers>, DCPTime>
1028 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1029 {
1030         DCPTime const discard_time = discard_to - time;
1031         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1032         Frame remaining_frames = audio->frames() - discard_frames;
1033         if (remaining_frames <= 0) {
1034                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1035         }
1036         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1037         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1038         return make_pair(cut, time + discard_time);
1039 }
1040
1041 void
1042 Player::set_dcp_decode_reduction (optional<int> reduction)
1043 {
1044         if (reduction == _dcp_decode_reduction) {
1045                 return;
1046         }
1047
1048         _dcp_decode_reduction = reduction;
1049         _have_valid_pieces = false;
1050         Changed (false);
1051 }