Merge branch 'master' of ssh://git.carlh.net/home/carl/git/dcpomatic
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_text (false)
93         , _always_burn_open_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102         set_video_container_size (_film->frame_size ());
103
104         film_changed (Film::AUDIO_PROCESSOR);
105
106         seek (DCPTime (), true);
107 }
108
109 Player::~Player ()
110 {
111         delete _shuffler;
112 }
113
114 void
115 Player::setup_pieces ()
116 {
117         _pieces.clear ();
118
119         delete _shuffler;
120         _shuffler = new Shuffler();
121         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
122
123         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
124
125                 if (!i->paths_valid ()) {
126                         continue;
127                 }
128
129                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131
132                 if (!decoder) {
133                         /* Not something that we can decode; e.g. Atmos content */
134                         continue;
135                 }
136
137                 if (decoder->video && _ignore_video) {
138                         decoder->video->set_ignore (true);
139                 }
140
141                 if (_ignore_text) {
142                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
143                                 i->set_ignore (true);
144                         }
145                 }
146
147                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
148                 if (dcp) {
149                         dcp->set_decode_referenced (_play_referenced);
150                         if (_play_referenced) {
151                                 dcp->set_forced_reduction (_dcp_decode_reduction);
152                         }
153                 }
154
155                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
156                 _pieces.push_back (piece);
157
158                 if (decoder->video) {
159                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
160                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
161                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
162                         } else {
163                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
164                         }
165                 }
166
167                 if (decoder->audio) {
168                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
169                 }
170
171                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
172
173                 while (j != decoder->text.end()) {
174                         (*j)->BitmapStart.connect (
175                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
176                                 );
177                         (*j)->PlainStart.connect (
178                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
179                                 );
180                         (*j)->Stop.connect (
181                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
182                                 );
183
184                         ++j;
185                 }
186         }
187
188         _stream_states.clear ();
189         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
190                 if (i->content->audio) {
191                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
192                                 _stream_states[j] = StreamState (i, i->content->position ());
193                         }
194                 }
195         }
196
197         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
198         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
199
200         _last_video_time = DCPTime ();
201         _last_video_eyes = EYES_BOTH;
202         _last_audio_time = DCPTime ();
203         _have_valid_pieces = true;
204 }
205
206 void
207 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
208 {
209         shared_ptr<Content> c = w.lock ();
210         if (!c) {
211                 return;
212         }
213
214         if (
215                 property == ContentProperty::POSITION ||
216                 property == ContentProperty::LENGTH ||
217                 property == ContentProperty::TRIM_START ||
218                 property == ContentProperty::TRIM_END ||
219                 property == ContentProperty::PATH ||
220                 property == VideoContentProperty::FRAME_TYPE ||
221                 property == VideoContentProperty::COLOUR_CONVERSION ||
222                 property == AudioContentProperty::STREAMS ||
223                 property == DCPContentProperty::NEEDS_ASSETS ||
224                 property == DCPContentProperty::NEEDS_KDM ||
225                 property == TextContentProperty::COLOUR ||
226                 property == TextContentProperty::EFFECT ||
227                 property == TextContentProperty::EFFECT_COLOUR ||
228                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
229                 property == FFmpegContentProperty::FILTERS
230                 ) {
231
232                 _have_valid_pieces = false;
233                 Changed (property, frequent);
234
235         } else if (
236                 property == TextContentProperty::LINE_SPACING ||
237                 property == TextContentProperty::OUTLINE_WIDTH ||
238                 property == TextContentProperty::Y_SCALE ||
239                 property == TextContentProperty::FADE_IN ||
240                 property == TextContentProperty::FADE_OUT ||
241                 property == ContentProperty::VIDEO_FRAME_RATE ||
242                 property == TextContentProperty::USE ||
243                 property == TextContentProperty::X_OFFSET ||
244                 property == TextContentProperty::Y_OFFSET ||
245                 property == TextContentProperty::X_SCALE ||
246                 property == TextContentProperty::FONTS ||
247                 property == TextContentProperty::TYPE ||
248                 property == VideoContentProperty::CROP ||
249                 property == VideoContentProperty::SCALE ||
250                 property == VideoContentProperty::FADE_IN ||
251                 property == VideoContentProperty::FADE_OUT
252                 ) {
253
254                 Changed (property, frequent);
255         }
256 }
257
258 void
259 Player::set_video_container_size (dcp::Size s)
260 {
261         if (s == _video_container_size) {
262                 return;
263         }
264
265         _video_container_size = s;
266
267         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
268         _black_image->make_black ();
269
270         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
271 }
272
273 void
274 Player::playlist_changed ()
275 {
276         _have_valid_pieces = false;
277         Changed (PlayerProperty::PLAYLIST, false);
278 }
279
280 void
281 Player::film_changed (Film::Property p)
282 {
283         /* Here we should notice Film properties that affect our output, and
284            alert listeners that our output now would be different to how it was
285            last time we were run.
286         */
287
288         if (p == Film::CONTAINER) {
289                 Changed (PlayerProperty::FILM_CONTAINER, false);
290         } else if (p == Film::VIDEO_FRAME_RATE) {
291                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
292                    so we need new pieces here.
293                 */
294                 _have_valid_pieces = false;
295                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
296         } else if (p == Film::AUDIO_PROCESSOR) {
297                 if (_film->audio_processor ()) {
298                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
299                 }
300         } else if (p == Film::AUDIO_CHANNELS) {
301                 _audio_merger.clear ();
302         }
303 }
304
305 list<PositionImage>
306 Player::transform_bitmap_texts (list<BitmapText> subs) const
307 {
308         list<PositionImage> all;
309
310         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
311                 if (!i->image) {
312                         continue;
313                 }
314
315                 /* We will scale the subtitle up to fit _video_container_size */
316                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
317
318                 all.push_back (
319                         PositionImage (
320                                 i->image->scale (
321                                         scaled_size,
322                                         dcp::YUV_TO_RGB_REC601,
323                                         i->image->pixel_format (),
324                                         true,
325                                         _fast
326                                         ),
327                                 Position<int> (
328                                         lrint (_video_container_size.width * i->rectangle.x),
329                                         lrint (_video_container_size.height * i->rectangle.y)
330                                         )
331                                 )
332                         );
333         }
334
335         return all;
336 }
337
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
340 {
341         return shared_ptr<PlayerVideo> (
342                 new PlayerVideo (
343                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344                         Crop (),
345                         optional<double> (),
346                         _video_container_size,
347                         _video_container_size,
348                         eyes,
349                         PART_WHOLE,
350                         PresetColourConversion::all().front().conversion,
351                         boost::weak_ptr<Content>(),
352                         boost::optional<Frame>()
353                 )
354         );
355 }
356
357 Frame
358 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
359 {
360         DCPTime s = t - piece->content->position ();
361         s = min (piece->content->length_after_trim(), s);
362         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
363
364         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
365            then convert that ContentTime to frames at the content's rate.  However this fails for
366            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
367            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
368
369            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
370         */
371         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
372 }
373
374 DCPTime
375 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
376 {
377         /* See comment in dcp_to_content_video */
378         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
379         return d + piece->content->position();
380 }
381
382 Frame
383 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
384 {
385         DCPTime s = t - piece->content->position ();
386         s = min (piece->content->length_after_trim(), s);
387         /* See notes in dcp_to_content_video */
388         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
389 }
390
391 DCPTime
392 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
393 {
394         /* See comment in dcp_to_content_video */
395         return DCPTime::from_frames (f, _film->audio_frame_rate())
396                 - DCPTime (piece->content->trim_start(), piece->frc)
397                 + piece->content->position();
398 }
399
400 ContentTime
401 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
402 {
403         DCPTime s = t - piece->content->position ();
404         s = min (piece->content->length_after_trim(), s);
405         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
406 }
407
408 DCPTime
409 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
410 {
411         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
412 }
413
414 list<shared_ptr<Font> >
415 Player::get_subtitle_fonts ()
416 {
417         if (!_have_valid_pieces) {
418                 setup_pieces ();
419         }
420
421         list<shared_ptr<Font> > fonts;
422         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
423                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
424                         /* XXX: things may go wrong if there are duplicate font IDs
425                            with different font files.
426                         */
427                         list<shared_ptr<Font> > f = j->fonts ();
428                         copy (f.begin(), f.end(), back_inserter (fonts));
429                 }
430         }
431
432         return fonts;
433 }
434
435 /** Set this player never to produce any video data */
436 void
437 Player::set_ignore_video ()
438 {
439         _ignore_video = true;
440 }
441
442 void
443 Player::set_ignore_text ()
444 {
445         _ignore_text = true;
446 }
447
448 /** Set the player to always burn open texts into the image regardless of the content settings */
449 void
450 Player::set_always_burn_open_subtitles ()
451 {
452         _always_burn_open_subtitles = true;
453 }
454
455 /** Sets up the player to be faster, possibly at the expense of quality */
456 void
457 Player::set_fast ()
458 {
459         _fast = true;
460         _have_valid_pieces = false;
461 }
462
463 void
464 Player::set_play_referenced ()
465 {
466         _play_referenced = true;
467         _have_valid_pieces = false;
468 }
469
470 list<ReferencedReelAsset>
471 Player::get_reel_assets ()
472 {
473         list<ReferencedReelAsset> a;
474
475         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
476                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
477                 if (!j) {
478                         continue;
479                 }
480
481                 scoped_ptr<DCPDecoder> decoder;
482                 try {
483                         decoder.reset (new DCPDecoder (j, _film->log(), false));
484                 } catch (...) {
485                         return a;
486                 }
487
488                 int64_t offset = 0;
489                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
490
491                         DCPOMATIC_ASSERT (j->video_frame_rate ());
492                         double const cfr = j->video_frame_rate().get();
493                         Frame const trim_start = j->trim_start().frames_round (cfr);
494                         Frame const trim_end = j->trim_end().frames_round (cfr);
495                         int const ffr = _film->video_frame_rate ();
496
497                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
498                         if (j->reference_video ()) {
499                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
500                                 DCPOMATIC_ASSERT (ra);
501                                 ra->set_entry_point (ra->entry_point() + trim_start);
502                                 ra->set_duration (ra->duration() - trim_start - trim_end);
503                                 a.push_back (
504                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
505                                         );
506                         }
507
508                         if (j->reference_audio ()) {
509                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
510                                 DCPOMATIC_ASSERT (ra);
511                                 ra->set_entry_point (ra->entry_point() + trim_start);
512                                 ra->set_duration (ra->duration() - trim_start - trim_end);
513                                 a.push_back (
514                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
515                                         );
516                         }
517
518                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
519                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
520                                 DCPOMATIC_ASSERT (ra);
521                                 ra->set_entry_point (ra->entry_point() + trim_start);
522                                 ra->set_duration (ra->duration() - trim_start - trim_end);
523                                 a.push_back (
524                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
525                                         );
526                         }
527
528                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
529                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
530                                 DCPOMATIC_ASSERT (ra);
531                                 ra->set_entry_point (ra->entry_point() + trim_start);
532                                 ra->set_duration (ra->duration() - trim_start - trim_end);
533                                 a.push_back (
534                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
535                                         );
536                         }
537
538                         /* Assume that main picture duration is the length of the reel */
539                         offset += k->main_picture()->duration ();
540                 }
541         }
542
543         return a;
544 }
545
546 bool
547 Player::pass ()
548 {
549         if (!_have_valid_pieces) {
550                 setup_pieces ();
551         }
552
553         if (_playlist->length() == DCPTime()) {
554                 /* Special case of an empty Film; just give one black frame */
555                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
556                 return true;
557         }
558
559         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
560
561         shared_ptr<Piece> earliest_content;
562         optional<DCPTime> earliest_time;
563
564         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
565                 if (i->done) {
566                         continue;
567                 }
568
569                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
570                 if (t > i->content->end()) {
571                         i->done = true;
572                 } else {
573
574                         /* Given two choices at the same time, pick the one with texts so we see it before
575                            the video.
576                         */
577                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
578                                 earliest_time = t;
579                                 earliest_content = i;
580                         }
581                 }
582         }
583
584         bool done = false;
585
586         enum {
587                 NONE,
588                 CONTENT,
589                 BLACK,
590                 SILENT
591         } which = NONE;
592
593         if (earliest_content) {
594                 which = CONTENT;
595         }
596
597         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
598                 earliest_time = _black.position ();
599                 which = BLACK;
600         }
601
602         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
603                 earliest_time = _silent.position ();
604                 which = SILENT;
605         }
606
607         switch (which) {
608         case CONTENT:
609                 earliest_content->done = earliest_content->decoder->pass ();
610                 break;
611         case BLACK:
612                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
613                 _black.set_position (_black.position() + one_video_frame());
614                 break;
615         case SILENT:
616         {
617                 DCPTimePeriod period (_silent.period_at_position());
618                 if (_last_audio_time) {
619                         /* Sometimes the thing that happened last finishes fractionally before
620                            this silence.  Bodge the start time of the silence to fix it.  I'm
621                            not sure if this is the right solution --- maybe the last thing should
622                            be padded `forward' rather than this thing padding `back'.
623                         */
624                         period.from = min(period.from, *_last_audio_time);
625                 }
626                 if (period.duration() > one_video_frame()) {
627                         period.to = period.from + one_video_frame();
628                 }
629                 fill_audio (period);
630                 _silent.set_position (period.to);
631                 break;
632         }
633         case NONE:
634                 done = true;
635                 break;
636         }
637
638         /* Emit any audio that is ready */
639
640         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
641            of our streams, or the position of the _silent.
642         */
643         DCPTime pull_to = _film->length ();
644         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
645                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
646                         pull_to = i->second.last_push_end;
647                 }
648         }
649         if (!_silent.done() && _silent.position() < pull_to) {
650                 pull_to = _silent.position();
651         }
652
653         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
654         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
655                 if (_last_audio_time && i->second < *_last_audio_time) {
656                         /* This new data comes before the last we emitted (or the last seek); discard it */
657                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
658                         if (!cut.first) {
659                                 continue;
660                         }
661                         *i = cut;
662                 } else if (_last_audio_time && i->second > *_last_audio_time) {
663                         /* There's a gap between this data and the last we emitted; fill with silence */
664                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
665                 }
666
667                 emit_audio (i->first, i->second);
668         }
669
670         if (done) {
671                 _shuffler->flush ();
672                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
673                         do_emit_video(i->first, i->second);
674                 }
675         }
676
677         return done;
678 }
679
680 list<PlayerText>
681 Player::closed_captions_for_frame (DCPTime time) const
682 {
683         return _active_texts[TEXT_CLOSED_CAPTION].get (
684                 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
685                 );
686 }
687
688 /** @return Open subtitles for the frame at the given time, converted to images */
689 optional<PositionImage>
690 Player::open_subtitles_for_frame (DCPTime time) const
691 {
692         list<PositionImage> captions;
693         int const vfr = _film->video_frame_rate();
694
695         BOOST_FOREACH (
696                 PlayerText j,
697                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
698                 ) {
699
700                 /* Image subtitles */
701                 list<PositionImage> c = transform_bitmap_texts (j.image);
702                 copy (c.begin(), c.end(), back_inserter (captions));
703
704                 /* Text subtitles (rendered to an image) */
705                 if (!j.text.empty ()) {
706                         list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
707                         copy (s.begin(), s.end(), back_inserter (captions));
708                 }
709         }
710
711         if (captions.empty ()) {
712                 return optional<PositionImage> ();
713         }
714
715         return merge (captions);
716 }
717
718 void
719 Player::video (weak_ptr<Piece> wp, ContentVideo video)
720 {
721         shared_ptr<Piece> piece = wp.lock ();
722         if (!piece) {
723                 return;
724         }
725
726         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
727         if (frc.skip && (video.frame % 2) == 1) {
728                 return;
729         }
730
731         /* Time of the first frame we will emit */
732         DCPTime const time = content_video_to_dcp (piece, video.frame);
733
734         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
735            if it's after the content's period here as in that case we still need to fill any gap between
736            `now' and the end of the content's period.
737         */
738         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
739                 return;
740         }
741
742         /* Fill gaps that we discover now that we have some video which needs to be emitted.
743            This is where we need to fill to.
744         */
745         DCPTime fill_to = min (time, piece->content->end());
746
747         if (_last_video_time) {
748                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
749                 LastVideoMap::const_iterator last = _last_video.find (wp);
750                 if (_film->three_d()) {
751                         DCPTime j = fill_from;
752                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
753                         if (eyes == EYES_BOTH) {
754                                 eyes = EYES_LEFT;
755                         }
756                         while (j < fill_to || eyes != video.eyes) {
757                                 if (last != _last_video.end()) {
758                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
759                                         copy->set_eyes (eyes);
760                                         emit_video (copy, j);
761                                 } else {
762                                         emit_video (black_player_video_frame(eyes), j);
763                                 }
764                                 if (eyes == EYES_RIGHT) {
765                                         j += one_video_frame();
766                                 }
767                                 eyes = increment_eyes (eyes);
768                         }
769                 } else {
770                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
771                                 if (last != _last_video.end()) {
772                                         emit_video (last->second, j);
773                                 } else {
774                                         emit_video (black_player_video_frame(EYES_BOTH), j);
775                                 }
776                         }
777                 }
778         }
779
780         _last_video[wp].reset (
781                 new PlayerVideo (
782                         video.image,
783                         piece->content->video->crop (),
784                         piece->content->video->fade (video.frame),
785                         piece->content->video->scale().size (
786                                 piece->content->video, _video_container_size, _film->frame_size ()
787                                 ),
788                         _video_container_size,
789                         video.eyes,
790                         video.part,
791                         piece->content->video->colour_conversion(),
792                         piece->content,
793                         video.frame
794                         )
795                 );
796
797         DCPTime t = time;
798         for (int i = 0; i < frc.repeat; ++i) {
799                 if (t < piece->content->end()) {
800                         emit_video (_last_video[wp], t);
801                 }
802                 t += one_video_frame ();
803         }
804 }
805
806 void
807 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
808 {
809         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
810
811         shared_ptr<Piece> piece = wp.lock ();
812         if (!piece) {
813                 return;
814         }
815
816         shared_ptr<AudioContent> content = piece->content->audio;
817         DCPOMATIC_ASSERT (content);
818
819         /* Compute time in the DCP */
820         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
821         /* And the end of this block in the DCP */
822         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
823
824         /* Remove anything that comes before the start or after the end of the content */
825         if (time < piece->content->position()) {
826                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
827                 if (!cut.first) {
828                         /* This audio is entirely discarded */
829                         return;
830                 }
831                 content_audio.audio = cut.first;
832                 time = cut.second;
833         } else if (time > piece->content->end()) {
834                 /* Discard it all */
835                 return;
836         } else if (end > piece->content->end()) {
837                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
838                 if (remaining_frames == 0) {
839                         return;
840                 }
841                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
842                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
843                 content_audio.audio = cut;
844         }
845
846         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
847
848         /* Gain */
849
850         if (content->gain() != 0) {
851                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
852                 gain->apply_gain (content->gain ());
853                 content_audio.audio = gain;
854         }
855
856         /* Remap */
857
858         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
859
860         /* Process */
861
862         if (_audio_processor) {
863                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
864         }
865
866         /* Push */
867
868         _audio_merger.push (content_audio.audio, time);
869         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
870         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
871 }
872
873 void
874 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
875 {
876         shared_ptr<Piece> piece = wp.lock ();
877         shared_ptr<const TextContent> text = wc.lock ();
878         if (!piece || !text) {
879                 return;
880         }
881
882         /* Apply content's subtitle offsets */
883         subtitle.sub.rectangle.x += text->x_offset ();
884         subtitle.sub.rectangle.y += text->y_offset ();
885
886         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
887         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
888         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
889
890         /* Apply content's subtitle scale */
891         subtitle.sub.rectangle.width *= text->x_scale ();
892         subtitle.sub.rectangle.height *= text->y_scale ();
893
894         PlayerText ps;
895         ps.image.push_back (subtitle.sub);
896         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
897
898         _active_texts[subtitle.type()].add_from (wc, ps, from);
899 }
900
901 void
902 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
903 {
904         shared_ptr<Piece> piece = wp.lock ();
905         shared_ptr<const TextContent> text = wc.lock ();
906         if (!piece || !text) {
907                 return;
908         }
909
910         PlayerText ps;
911         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
912
913         if (from > piece->content->end()) {
914                 return;
915         }
916
917         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
918                 s.set_h_position (s.h_position() + text->x_offset ());
919                 s.set_v_position (s.v_position() + text->y_offset ());
920                 float const xs = text->x_scale();
921                 float const ys = text->y_scale();
922                 float size = s.size();
923
924                 /* Adjust size to express the common part of the scaling;
925                    e.g. if xs = ys = 0.5 we scale size by 2.
926                 */
927                 if (xs > 1e-5 && ys > 1e-5) {
928                         size *= 1 / min (1 / xs, 1 / ys);
929                 }
930                 s.set_size (size);
931
932                 /* Then express aspect ratio changes */
933                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
934                         s.set_aspect_adjust (xs / ys);
935                 }
936
937                 s.set_in (dcp::Time(from.seconds(), 1000));
938                 ps.text.push_back (StringText (s, text->outline_width()));
939                 ps.add_fonts (text->fonts ());
940         }
941
942         _active_texts[subtitle.type()].add_from (wc, ps, from);
943 }
944
945 void
946 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
947 {
948         if (!_active_texts[type].have (wc)) {
949                 return;
950         }
951
952         shared_ptr<Piece> piece = wp.lock ();
953         shared_ptr<const TextContent> text = wc.lock ();
954         if (!piece || !text) {
955                 return;
956         }
957
958         DCPTime const dcp_to = content_time_to_dcp (piece, to);
959
960         if (dcp_to > piece->content->end()) {
961                 return;
962         }
963
964         pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
965
966         bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
967         if (text->use() && !always && !text->burn()) {
968                 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
969         }
970 }
971
972 void
973 Player::seek (DCPTime time, bool accurate)
974 {
975         if (!_have_valid_pieces) {
976                 setup_pieces ();
977         }
978
979         if (_shuffler) {
980                 _shuffler->clear ();
981         }
982
983         _delay.clear ();
984
985         if (_audio_processor) {
986                 _audio_processor->flush ();
987         }
988
989         _audio_merger.clear ();
990         for (int i = 0; i < TEXT_COUNT; ++i) {
991                 _active_texts[i].clear ();
992         }
993
994         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
995                 if (time < i->content->position()) {
996                         /* Before; seek to the start of the content */
997                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
998                         i->done = false;
999                 } else if (i->content->position() <= time && time < i->content->end()) {
1000                         /* During; seek to position */
1001                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1002                         i->done = false;
1003                 } else {
1004                         /* After; this piece is done */
1005                         i->done = true;
1006                 }
1007         }
1008
1009         if (accurate) {
1010                 _last_video_time = time;
1011                 _last_video_eyes = EYES_LEFT;
1012                 _last_audio_time = time;
1013         } else {
1014                 _last_video_time = optional<DCPTime>();
1015                 _last_video_eyes = optional<Eyes>();
1016                 _last_audio_time = optional<DCPTime>();
1017         }
1018
1019         _black.set_position (time);
1020         _silent.set_position (time);
1021
1022         _last_video.clear ();
1023 }
1024
1025 void
1026 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1027 {
1028         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1029            player before the video that requires them.
1030         */
1031         _delay.push_back (make_pair (pv, time));
1032
1033         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1034                 _last_video_time = time + one_video_frame();
1035         }
1036         _last_video_eyes = increment_eyes (pv->eyes());
1037
1038         if (_delay.size() < 3) {
1039                 return;
1040         }
1041
1042         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1043         _delay.pop_front();
1044         do_emit_video (to_do.first, to_do.second);
1045 }
1046
1047 void
1048 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1049 {
1050         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1051                 for (int i = 0; i < TEXT_COUNT; ++i) {
1052                         _active_texts[i].clear_before (time);
1053                 }
1054         }
1055
1056         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1057         if (subtitles) {
1058                 pv->set_text (subtitles.get ());
1059         }
1060
1061         Video (pv, time);
1062 }
1063
1064 void
1065 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1066 {
1067         /* Log if the assert below is about to fail */
1068         if (_last_audio_time && time != *_last_audio_time) {
1069                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1070         }
1071
1072         /* This audio must follow on from the previous */
1073         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1074         Audio (data, time);
1075         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1076 }
1077
1078 void
1079 Player::fill_audio (DCPTimePeriod period)
1080 {
1081         if (period.from == period.to) {
1082                 return;
1083         }
1084
1085         DCPOMATIC_ASSERT (period.from < period.to);
1086
1087         DCPTime t = period.from;
1088         while (t < period.to) {
1089                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1090                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1091                 if (samples) {
1092                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1093                         silence->make_silent ();
1094                         emit_audio (silence, t);
1095                 }
1096                 t += block;
1097         }
1098 }
1099
1100 DCPTime
1101 Player::one_video_frame () const
1102 {
1103         return DCPTime::from_frames (1, _film->video_frame_rate ());
1104 }
1105
1106 pair<shared_ptr<AudioBuffers>, DCPTime>
1107 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1108 {
1109         DCPTime const discard_time = discard_to - time;
1110         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1111         Frame remaining_frames = audio->frames() - discard_frames;
1112         if (remaining_frames <= 0) {
1113                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1114         }
1115         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1116         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1117         return make_pair(cut, time + discard_time);
1118 }
1119
1120 void
1121 Player::set_dcp_decode_reduction (optional<int> reduction)
1122 {
1123         if (reduction == _dcp_decode_reduction) {
1124                 return;
1125         }
1126
1127         _dcp_decode_reduction = reduction;
1128         _have_valid_pieces = false;
1129         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1130 }
1131
1132 DCPTime
1133 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1134 {
1135         if (_have_valid_pieces) {
1136                 setup_pieces ();
1137         }
1138
1139         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1140                 if (i->content == content) {
1141                         return content_time_to_dcp (i, t);
1142                 }
1143         }
1144
1145         DCPOMATIC_ASSERT (false);
1146         return DCPTime ();
1147 }