Fix incorrect reel lengths when start-trimmed movie content follows
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
56 #include <stdint.h>
57 #include <algorithm>
58 #include <iostream>
59
60 #include "i18n.h"
61
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
81         : _film (film)
82         , _playlist (playlist)
83         , _have_valid_pieces (false)
84         , _ignore_video (false)
85         , _ignore_subtitle (false)
86         , _always_burn_subtitles (false)
87         , _fast (false)
88         , _play_referenced (false)
89         , _audio_merger (_film->audio_frame_rate())
90 {
91         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94         set_video_container_size (_film->frame_size ());
95
96         film_changed (Film::AUDIO_PROCESSOR);
97
98         seek (DCPTime (), true);
99 }
100
101 void
102 Player::setup_pieces ()
103 {
104         _pieces.clear ();
105
106         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
107
108                 if (!i->paths_valid ()) {
109                         continue;
110                 }
111
112                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
113                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
114
115                 if (!decoder) {
116                         /* Not something that we can decode; e.g. Atmos content */
117                         continue;
118                 }
119
120                 if (decoder->video && _ignore_video) {
121                         decoder->video->set_ignore ();
122                 }
123
124                 if (decoder->subtitle && _ignore_subtitle) {
125                         decoder->subtitle->set_ignore ();
126                 }
127
128                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129                 if (dcp && _play_referenced) {
130                         if (_play_referenced) {
131                                 dcp->set_decode_referenced ();
132                         }
133                         dcp->set_forced_reduction (_dcp_decode_reduction);
134                 }
135
136                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
137                 _pieces.push_back (piece);
138
139                 if (decoder->video) {
140                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
141                 }
142
143                 if (decoder->audio) {
144                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
145                 }
146
147                 if (decoder->subtitle) {
148                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
150                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
151                 }
152         }
153
154         _stream_states.clear ();
155         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
156                 if (i->content->audio) {
157                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
158                                 _stream_states[j] = StreamState (i, i->content->position ());
159                         }
160                 }
161         }
162
163         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
164         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
165
166         _last_video_time = DCPTime ();
167         _last_audio_time = DCPTime ();
168         _have_valid_pieces = true;
169 }
170
171 void
172 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
173 {
174         shared_ptr<Content> c = w.lock ();
175         if (!c) {
176                 return;
177         }
178
179         if (
180                 property == ContentProperty::POSITION ||
181                 property == ContentProperty::LENGTH ||
182                 property == ContentProperty::TRIM_START ||
183                 property == ContentProperty::TRIM_END ||
184                 property == ContentProperty::PATH ||
185                 property == VideoContentProperty::FRAME_TYPE ||
186                 property == DCPContentProperty::NEEDS_ASSETS ||
187                 property == DCPContentProperty::NEEDS_KDM ||
188                 property == SubtitleContentProperty::COLOUR ||
189                 property == SubtitleContentProperty::EFFECT ||
190                 property == SubtitleContentProperty::EFFECT_COLOUR ||
191                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
192                 property == FFmpegContentProperty::FILTERS ||
193                 property == VideoContentProperty::COLOUR_CONVERSION
194                 ) {
195
196                 _have_valid_pieces = false;
197                 Changed (frequent);
198
199         } else if (
200                 property == SubtitleContentProperty::LINE_SPACING ||
201                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
202                 property == SubtitleContentProperty::Y_SCALE ||
203                 property == SubtitleContentProperty::FADE_IN ||
204                 property == SubtitleContentProperty::FADE_OUT ||
205                 property == ContentProperty::VIDEO_FRAME_RATE ||
206                 property == SubtitleContentProperty::USE ||
207                 property == SubtitleContentProperty::X_OFFSET ||
208                 property == SubtitleContentProperty::Y_OFFSET ||
209                 property == SubtitleContentProperty::X_SCALE ||
210                 property == SubtitleContentProperty::FONTS ||
211                 property == VideoContentProperty::CROP ||
212                 property == VideoContentProperty::SCALE ||
213                 property == VideoContentProperty::FADE_IN ||
214                 property == VideoContentProperty::FADE_OUT
215                 ) {
216
217                 Changed (frequent);
218         }
219 }
220
221 void
222 Player::set_video_container_size (dcp::Size s)
223 {
224         if (s == _video_container_size) {
225                 return;
226         }
227
228         _video_container_size = s;
229
230         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
231         _black_image->make_black ();
232
233         Changed (false);
234 }
235
236 void
237 Player::playlist_changed ()
238 {
239         _have_valid_pieces = false;
240         Changed (false);
241 }
242
243 void
244 Player::film_changed (Film::Property p)
245 {
246         /* Here we should notice Film properties that affect our output, and
247            alert listeners that our output now would be different to how it was
248            last time we were run.
249         */
250
251         if (p == Film::CONTAINER) {
252                 Changed (false);
253         } else if (p == Film::VIDEO_FRAME_RATE) {
254                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
255                    so we need new pieces here.
256                 */
257                 _have_valid_pieces = false;
258                 Changed (false);
259         } else if (p == Film::AUDIO_PROCESSOR) {
260                 if (_film->audio_processor ()) {
261                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
262                 }
263         }
264 }
265
266 list<PositionImage>
267 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
268 {
269         list<PositionImage> all;
270
271         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
272                 if (!i->image) {
273                         continue;
274                 }
275
276                 /* We will scale the subtitle up to fit _video_container_size */
277                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
278
279                 /* Then we need a corrective translation, consisting of two parts:
280                  *
281                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
282                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
283                  *
284                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
285                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
286                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
287                  *
288                  * Combining these two translations gives these expressions.
289                  */
290
291                 all.push_back (
292                         PositionImage (
293                                 i->image->scale (
294                                         scaled_size,
295                                         dcp::YUV_TO_RGB_REC601,
296                                         i->image->pixel_format (),
297                                         true,
298                                         _fast
299                                         ),
300                                 Position<int> (
301                                         lrint (_video_container_size.width * i->rectangle.x),
302                                         lrint (_video_container_size.height * i->rectangle.y)
303                                         )
304                                 )
305                         );
306         }
307
308         return all;
309 }
310
311 shared_ptr<PlayerVideo>
312 Player::black_player_video_frame () const
313 {
314         return shared_ptr<PlayerVideo> (
315                 new PlayerVideo (
316                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
317                         Crop (),
318                         optional<double> (),
319                         _video_container_size,
320                         _video_container_size,
321                         EYES_BOTH,
322                         PART_WHOLE,
323                         PresetColourConversion::all().front().conversion
324                 )
325         );
326 }
327
328 Frame
329 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
330 {
331         DCPTime s = t - piece->content->position ();
332         s = min (piece->content->length_after_trim(), s);
333         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
334
335         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
336            then convert that ContentTime to frames at the content's rate.  However this fails for
337            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
338            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
339
340            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
341         */
342         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
343 }
344
345 DCPTime
346 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
347 {
348         /* See comment in dcp_to_content_video */
349         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
350         return d + piece->content->position();
351 }
352
353 Frame
354 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
355 {
356         DCPTime s = t - piece->content->position ();
357         s = min (piece->content->length_after_trim(), s);
358         /* See notes in dcp_to_content_video */
359         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
360 }
361
362 DCPTime
363 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
364 {
365         /* See comment in dcp_to_content_video */
366         return DCPTime::from_frames (f, _film->audio_frame_rate())
367                 - DCPTime (piece->content->trim_start(), piece->frc)
368                 + piece->content->position();
369 }
370
371 ContentTime
372 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
373 {
374         DCPTime s = t - piece->content->position ();
375         s = min (piece->content->length_after_trim(), s);
376         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
377 }
378
379 DCPTime
380 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
381 {
382         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
383 }
384
385 list<shared_ptr<Font> >
386 Player::get_subtitle_fonts ()
387 {
388         if (!_have_valid_pieces) {
389                 setup_pieces ();
390         }
391
392         list<shared_ptr<Font> > fonts;
393         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
394                 if (p->content->subtitle) {
395                         /* XXX: things may go wrong if there are duplicate font IDs
396                            with different font files.
397                         */
398                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
399                         copy (f.begin(), f.end(), back_inserter (fonts));
400                 }
401         }
402
403         return fonts;
404 }
405
406 /** Set this player never to produce any video data */
407 void
408 Player::set_ignore_video ()
409 {
410         _ignore_video = true;
411 }
412
413 void
414 Player::set_ignore_subtitle ()
415 {
416         _ignore_subtitle = true;
417 }
418
419 /** Set whether or not this player should always burn text subtitles into the image,
420  *  regardless of the content settings.
421  *  @param burn true to always burn subtitles, false to obey content settings.
422  */
423 void
424 Player::set_always_burn_subtitles (bool burn)
425 {
426         _always_burn_subtitles = burn;
427 }
428
429 /** Sets up the player to be faster, possibly at the expense of quality */
430 void
431 Player::set_fast ()
432 {
433         _fast = true;
434         _have_valid_pieces = false;
435 }
436
437 void
438 Player::set_play_referenced ()
439 {
440         _play_referenced = true;
441         _have_valid_pieces = false;
442 }
443
444 list<ReferencedReelAsset>
445 Player::get_reel_assets ()
446 {
447         list<ReferencedReelAsset> a;
448
449         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
450                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
451                 if (!j) {
452                         continue;
453                 }
454
455                 scoped_ptr<DCPDecoder> decoder;
456                 try {
457                         decoder.reset (new DCPDecoder (j, _film->log(), false));
458                 } catch (...) {
459                         return a;
460                 }
461
462                 int64_t offset = 0;
463                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
464
465                         DCPOMATIC_ASSERT (j->video_frame_rate ());
466                         double const cfr = j->video_frame_rate().get();
467                         Frame const trim_start = j->trim_start().frames_round (cfr);
468                         Frame const trim_end = j->trim_end().frames_round (cfr);
469                         int const ffr = _film->video_frame_rate ();
470
471                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
472                         if (j->reference_video ()) {
473                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
474                                 DCPOMATIC_ASSERT (ra);
475                                 ra->set_entry_point (ra->entry_point() + trim_start);
476                                 ra->set_duration (ra->duration() - trim_start - trim_end);
477                                 a.push_back (
478                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
479                                         );
480                         }
481
482                         if (j->reference_audio ()) {
483                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
484                                 DCPOMATIC_ASSERT (ra);
485                                 ra->set_entry_point (ra->entry_point() + trim_start);
486                                 ra->set_duration (ra->duration() - trim_start - trim_end);
487                                 a.push_back (
488                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
489                                         );
490                         }
491
492                         if (j->reference_subtitle ()) {
493                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
494                                 DCPOMATIC_ASSERT (ra);
495                                 ra->set_entry_point (ra->entry_point() + trim_start);
496                                 ra->set_duration (ra->duration() - trim_start - trim_end);
497                                 a.push_back (
498                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
499                                         );
500                         }
501
502                         /* Assume that main picture duration is the length of the reel */
503                         offset += k->main_picture()->duration ();
504                 }
505         }
506
507         return a;
508 }
509
510 bool
511 Player::pass ()
512 {
513         if (!_have_valid_pieces) {
514                 setup_pieces ();
515         }
516
517         if (_playlist->length() == DCPTime()) {
518                 /* Special case of an empty Film; just give one black frame */
519                 emit_video (black_player_video_frame(), DCPTime());
520                 return true;
521         }
522
523         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
524
525         shared_ptr<Piece> earliest_content;
526         optional<DCPTime> earliest_time;
527
528         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
529                 if (i->done) {
530                         continue;
531                 }
532
533                 DCPTime const t = content_time_to_dcp (i, i->decoder->position());
534                 if (t > i->content->end()) {
535                         i->done = true;
536                 } else {
537                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
538                            the video.
539                         */
540                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
541                                 earliest_time = t;
542                                 earliest_content = i;
543                         }
544                 }
545         }
546
547         bool done = false;
548
549         enum {
550                 NONE,
551                 CONTENT,
552                 BLACK,
553                 SILENT
554         } which = NONE;
555
556         if (earliest_content) {
557                 which = CONTENT;
558         }
559
560         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
561                 earliest_time = _black.position ();
562                 which = BLACK;
563         }
564
565         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
566                 earliest_time = _silent.position ();
567                 which = SILENT;
568         }
569
570         switch (which) {
571         case CONTENT:
572                 earliest_content->done = earliest_content->decoder->pass ();
573                 break;
574         case BLACK:
575                 emit_video (black_player_video_frame(), _black.position());
576                 _black.set_position (_black.position() + one_video_frame());
577                 break;
578         case SILENT:
579         {
580                 DCPTimePeriod period (_silent.period_at_position());
581                 if (period.duration() > one_video_frame()) {
582                         period.to = period.from + one_video_frame();
583                 }
584                 fill_audio (period);
585                 _silent.set_position (period.to);
586                 break;
587         }
588         case NONE:
589                 done = true;
590                 break;
591         }
592
593         /* Emit any audio that is ready */
594
595         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
596            of our streams, or the position of the _silent.
597         */
598         DCPTime pull_to = _film->length ();
599         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
600                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
601                         pull_to = i->second.last_push_end;
602                 }
603         }
604         if (!_silent.done() && _silent.position() < pull_to) {
605                 pull_to = _silent.position();
606         }
607
608         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
609         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
610                 if (_last_audio_time && i->second < *_last_audio_time) {
611                         /* This new data comes before the last we emitted (or the last seek); discard it */
612                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
613                         if (!cut.first) {
614                                 continue;
615                         }
616                         *i = cut;
617                 } else if (_last_audio_time && i->second > *_last_audio_time) {
618                         /* There's a gap between this data and the last we emitted; fill with silence */
619                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
620                 }
621
622                 emit_audio (i->first, i->second);
623         }
624
625         return done;
626 }
627
628 optional<PositionImage>
629 Player::subtitles_for_frame (DCPTime time) const
630 {
631         list<PositionImage> subtitles;
632
633         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate())),  _always_burn_subtitles)) {
634
635                 /* Image subtitles */
636                 list<PositionImage> c = transform_image_subtitles (i.image);
637                 copy (c.begin(), c.end(), back_inserter (subtitles));
638
639                 /* Text subtitles (rendered to an image) */
640                 if (!i.text.empty ()) {
641                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
642                         copy (s.begin(), s.end(), back_inserter (subtitles));
643                 }
644         }
645
646         if (subtitles.empty ()) {
647                 return optional<PositionImage> ();
648         }
649
650         return merge (subtitles);
651 }
652
653 void
654 Player::video (weak_ptr<Piece> wp, ContentVideo video)
655 {
656         shared_ptr<Piece> piece = wp.lock ();
657         if (!piece) {
658                 return;
659         }
660
661         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
662         if (frc.skip && (video.frame % 2) == 1) {
663                 return;
664         }
665
666         /* Time of the first frame we will emit */
667         DCPTime const time = content_video_to_dcp (piece, video.frame);
668
669         /* Discard if it's outside the content's period or if it's before the last accurate seek */
670         if (
671                 time < piece->content->position() ||
672                 time >= piece->content->end() ||
673                 (_last_video_time && time < *_last_video_time)) {
674                 return;
675         }
676
677         /* Fill gaps that we discover now that we have some video which needs to be emitted */
678
679         if (_last_video_time) {
680                 /* XXX: this may not work for 3D */
681                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
682                 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
683                         LastVideoMap::const_iterator k = _last_video.find (wp);
684                         if (k != _last_video.end ()) {
685                                 emit_video (k->second, j);
686                         } else {
687                                 emit_video (black_player_video_frame(), j);
688                         }
689                 }
690         }
691
692         _last_video[wp].reset (
693                 new PlayerVideo (
694                         video.image,
695                         piece->content->video->crop (),
696                         piece->content->video->fade (video.frame),
697                         piece->content->video->scale().size (
698                                 piece->content->video, _video_container_size, _film->frame_size ()
699                                 ),
700                         _video_container_size,
701                         video.eyes,
702                         video.part,
703                         piece->content->video->colour_conversion ()
704                         )
705                 );
706
707         DCPTime t = time;
708         for (int i = 0; i < frc.repeat; ++i) {
709                 emit_video (_last_video[wp], t);
710                 t += one_video_frame ();
711         }
712 }
713
714 void
715 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
716 {
717         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
718
719         shared_ptr<Piece> piece = wp.lock ();
720         if (!piece) {
721                 return;
722         }
723
724         shared_ptr<AudioContent> content = piece->content->audio;
725         DCPOMATIC_ASSERT (content);
726
727         /* Compute time in the DCP */
728         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
729         /* And the end of this block in the DCP */
730         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
731
732         /* Remove anything that comes before the start or after the end of the content */
733         if (time < piece->content->position()) {
734                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
735                 if (!cut.first) {
736                         /* This audio is entirely discarded */
737                         return;
738                 }
739                 content_audio.audio = cut.first;
740                 time = cut.second;
741         } else if (time > piece->content->end()) {
742                 /* Discard it all */
743                 return;
744         } else if (end > piece->content->end()) {
745                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
746                 if (remaining_frames == 0) {
747                         return;
748                 }
749                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
750                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
751                 content_audio.audio = cut;
752         }
753
754         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
755
756         /* Gain */
757
758         if (content->gain() != 0) {
759                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
760                 gain->apply_gain (content->gain ());
761                 content_audio.audio = gain;
762         }
763
764         /* Remap */
765
766         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
767
768         /* Process */
769
770         if (_audio_processor) {
771                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
772         }
773
774         /* Push */
775
776         _audio_merger.push (content_audio.audio, time);
777         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
778         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
779 }
780
781 void
782 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
783 {
784         shared_ptr<Piece> piece = wp.lock ();
785         if (!piece) {
786                 return;
787         }
788
789         /* Apply content's subtitle offsets */
790         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
791         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
792
793         /* Apply content's subtitle scale */
794         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
795         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
796
797         /* Apply a corrective translation to keep the subtitle centred after that scale */
798         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
799         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
800
801         PlayerSubtitles ps;
802         ps.image.push_back (subtitle.sub);
803         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
804
805         _active_subtitles.add_from (wp, ps, from);
806 }
807
808 void
809 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
810 {
811         shared_ptr<Piece> piece = wp.lock ();
812         if (!piece) {
813                 return;
814         }
815
816         PlayerSubtitles ps;
817         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
818
819         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
820                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
821                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
822                 float const xs = piece->content->subtitle->x_scale();
823                 float const ys = piece->content->subtitle->y_scale();
824                 float size = s.size();
825
826                 /* Adjust size to express the common part of the scaling;
827                    e.g. if xs = ys = 0.5 we scale size by 2.
828                 */
829                 if (xs > 1e-5 && ys > 1e-5) {
830                         size *= 1 / min (1 / xs, 1 / ys);
831                 }
832                 s.set_size (size);
833
834                 /* Then express aspect ratio changes */
835                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
836                         s.set_aspect_adjust (xs / ys);
837                 }
838
839                 s.set_in (dcp::Time(from.seconds(), 1000));
840                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
841                 ps.add_fonts (piece->content->subtitle->fonts ());
842         }
843
844         _active_subtitles.add_from (wp, ps, from);
845 }
846
847 void
848 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
849 {
850         if (!_active_subtitles.have (wp)) {
851                 return;
852         }
853
854         shared_ptr<Piece> piece = wp.lock ();
855         if (!piece) {
856                 return;
857         }
858
859         DCPTime const dcp_to = content_time_to_dcp (piece, to);
860
861         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
862
863         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
864                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
865         }
866 }
867
868 void
869 Player::seek (DCPTime time, bool accurate)
870 {
871         if (!_have_valid_pieces) {
872                 setup_pieces ();
873         }
874
875         if (_audio_processor) {
876                 _audio_processor->flush ();
877         }
878
879         _audio_merger.clear ();
880         _active_subtitles.clear ();
881
882         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
883                 if (time < i->content->position()) {
884                         /* Before; seek to 0 */
885                         i->decoder->seek (ContentTime(), accurate);
886                         i->done = false;
887                 } else if (i->content->position() <= time && time < i->content->end()) {
888                         /* During; seek to position */
889                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
890                         i->done = false;
891                 } else {
892                         /* After; this piece is done */
893                         i->done = true;
894                 }
895         }
896
897         if (accurate) {
898                 _last_video_time = time;
899                 _last_audio_time = time;
900         } else {
901                 _last_video_time = optional<DCPTime>();
902                 _last_audio_time = optional<DCPTime>();
903         }
904
905         _black.set_position (time);
906         _silent.set_position (time);
907
908         _last_video.clear ();
909 }
910
911 void
912 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
913 {
914         optional<PositionImage> subtitles = subtitles_for_frame (time);
915         if (subtitles) {
916                 pv->set_subtitle (subtitles.get ());
917         }
918
919         Video (pv, time);
920
921         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
922                 _last_video_time = time + one_video_frame();
923                 _active_subtitles.clear_before (time);
924         }
925 }
926
927 void
928 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
929 {
930         /* This audio must follow on from the previous */
931         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
932         Audio (data, time);
933         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
934 }
935
936 void
937 Player::fill_audio (DCPTimePeriod period)
938 {
939         if (period.from == period.to) {
940                 return;
941         }
942
943         DCPOMATIC_ASSERT (period.from < period.to);
944
945         DCPTime t = period.from;
946         while (t < period.to) {
947                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
948                 Frame const samples = block.frames_round(_film->audio_frame_rate());
949                 if (samples) {
950                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
951                         silence->make_silent ();
952                         emit_audio (silence, t);
953                 }
954                 t += block;
955         }
956 }
957
958 DCPTime
959 Player::one_video_frame () const
960 {
961         return DCPTime::from_frames (1, _film->video_frame_rate ());
962 }
963
964 pair<shared_ptr<AudioBuffers>, DCPTime>
965 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
966 {
967         DCPTime const discard_time = discard_to - time;
968         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
969         Frame remaining_frames = audio->frames() - discard_frames;
970         if (remaining_frames <= 0) {
971                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
972         }
973         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
974         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
975         return make_pair(cut, time + discard_time);
976 }
977
978 void
979 Player::set_dcp_decode_reduction (optional<int> reduction)
980 {
981         if (reduction == _dcp_decode_reduction) {
982                 return;
983         }
984
985         _dcp_decode_reduction = reduction;
986         _have_valid_pieces = false;
987         Changed (false);
988 }