Untested; allow viewing of subtitles or closed captions in the preview.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "caption_content.h"
44 #include "caption_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
56 #include <stdint.h>
57 #include <algorithm>
58 #include <iostream>
59
60 #include "i18n.h"
61
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
81 int const PlayerProperty::PLAYLIST = 701;
82 int const PlayerProperty::FILM_CONTAINER = 702;
83 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
84 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
85
86 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
87         : _film (film)
88         , _playlist (playlist)
89         , _have_valid_pieces (false)
90         , _ignore_video (false)
91         , _ignore_subtitle (false)
92         , _fast (false)
93         , _play_referenced (false)
94         , _audio_merger (_film->audio_frame_rate())
95         , _shuffler (0)
96 {
97         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
98         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
99         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
100         set_video_container_size (_film->frame_size ());
101
102         film_changed (Film::AUDIO_PROCESSOR);
103
104         seek (DCPTime (), true);
105 }
106
107 Player::~Player ()
108 {
109         delete _shuffler;
110 }
111
112 void
113 Player::setup_pieces ()
114 {
115         _pieces.clear ();
116
117         delete _shuffler;
118         _shuffler = new Shuffler();
119         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
120
121         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
122
123                 if (!i->paths_valid ()) {
124                         continue;
125                 }
126
127                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
128                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
129
130                 if (!decoder) {
131                         /* Not something that we can decode; e.g. Atmos content */
132                         continue;
133                 }
134
135                 if (decoder->video && _ignore_video) {
136                         decoder->video->set_ignore (true);
137                 }
138
139                 if (decoder->caption && _ignore_subtitle) {
140                         decoder->caption->set_ignore (true);
141                 }
142
143                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
144                 if (dcp) {
145                         dcp->set_decode_referenced (_play_referenced);
146                         if (_play_referenced) {
147                                 dcp->set_forced_reduction (_dcp_decode_reduction);
148                         }
149                 }
150
151                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
152                 _pieces.push_back (piece);
153
154                 if (decoder->video) {
155                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
156                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
157                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
158                         } else {
159                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
160                         }
161                 }
162
163                 if (decoder->audio) {
164                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
165                 }
166
167                 if (decoder->caption) {
168                         decoder->caption->BitmapStart.connect (bind (&Player::bitmap_text_start, this, weak_ptr<Piece> (piece), _1));
169                         decoder->caption->PlainStart.connect (bind (&Player::plain_text_start, this, weak_ptr<Piece> (piece), _1));
170                         decoder->caption->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1, _2));
171                 }
172         }
173
174         _stream_states.clear ();
175         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
176                 if (i->content->audio) {
177                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
178                                 _stream_states[j] = StreamState (i, i->content->position ());
179                         }
180                 }
181         }
182
183         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
184         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
185
186         _last_video_time = DCPTime ();
187         _last_video_eyes = EYES_BOTH;
188         _last_audio_time = DCPTime ();
189         _have_valid_pieces = true;
190 }
191
192 void
193 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
194 {
195         shared_ptr<Content> c = w.lock ();
196         if (!c) {
197                 return;
198         }
199
200         if (
201                 property == ContentProperty::POSITION ||
202                 property == ContentProperty::LENGTH ||
203                 property == ContentProperty::TRIM_START ||
204                 property == ContentProperty::TRIM_END ||
205                 property == ContentProperty::PATH ||
206                 property == VideoContentProperty::FRAME_TYPE ||
207                 property == VideoContentProperty::COLOUR_CONVERSION ||
208                 property == AudioContentProperty::STREAMS ||
209                 property == DCPContentProperty::NEEDS_ASSETS ||
210                 property == DCPContentProperty::NEEDS_KDM ||
211                 property == CaptionContentProperty::COLOUR ||
212                 property == CaptionContentProperty::EFFECT ||
213                 property == CaptionContentProperty::EFFECT_COLOUR ||
214                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
215                 property == FFmpegContentProperty::FILTERS
216                 ) {
217
218                 _have_valid_pieces = false;
219                 Changed (property, frequent);
220
221         } else if (
222                 property == CaptionContentProperty::LINE_SPACING ||
223                 property == CaptionContentProperty::OUTLINE_WIDTH ||
224                 property == CaptionContentProperty::Y_SCALE ||
225                 property == CaptionContentProperty::FADE_IN ||
226                 property == CaptionContentProperty::FADE_OUT ||
227                 property == ContentProperty::VIDEO_FRAME_RATE ||
228                 property == CaptionContentProperty::USE ||
229                 property == CaptionContentProperty::X_OFFSET ||
230                 property == CaptionContentProperty::Y_OFFSET ||
231                 property == CaptionContentProperty::X_SCALE ||
232                 property == CaptionContentProperty::FONTS ||
233                 property == VideoContentProperty::CROP ||
234                 property == VideoContentProperty::SCALE ||
235                 property == VideoContentProperty::FADE_IN ||
236                 property == VideoContentProperty::FADE_OUT
237                 ) {
238
239                 Changed (property, frequent);
240         }
241 }
242
243 void
244 Player::set_video_container_size (dcp::Size s)
245 {
246         if (s == _video_container_size) {
247                 return;
248         }
249
250         _video_container_size = s;
251
252         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
253         _black_image->make_black ();
254
255         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
256 }
257
258 void
259 Player::playlist_changed ()
260 {
261         _have_valid_pieces = false;
262         Changed (PlayerProperty::PLAYLIST, false);
263 }
264
265 void
266 Player::film_changed (Film::Property p)
267 {
268         /* Here we should notice Film properties that affect our output, and
269            alert listeners that our output now would be different to how it was
270            last time we were run.
271         */
272
273         if (p == Film::CONTAINER) {
274                 Changed (PlayerProperty::FILM_CONTAINER, false);
275         } else if (p == Film::VIDEO_FRAME_RATE) {
276                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
277                    so we need new pieces here.
278                 */
279                 _have_valid_pieces = false;
280                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
281         } else if (p == Film::AUDIO_PROCESSOR) {
282                 if (_film->audio_processor ()) {
283                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
284                 }
285         } else if (p == Film::AUDIO_CHANNELS) {
286                 _audio_merger.clear ();
287         }
288 }
289
290 list<PositionImage>
291 Player::transform_bitmap_captions (list<BitmapCaption> subs) const
292 {
293         list<PositionImage> all;
294
295         for (list<BitmapCaption>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
296                 if (!i->image) {
297                         continue;
298                 }
299
300                 /* We will scale the subtitle up to fit _video_container_size */
301                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
302
303                 all.push_back (
304                         PositionImage (
305                                 i->image->scale (
306                                         scaled_size,
307                                         dcp::YUV_TO_RGB_REC601,
308                                         i->image->pixel_format (),
309                                         true,
310                                         _fast
311                                         ),
312                                 Position<int> (
313                                         lrint (_video_container_size.width * i->rectangle.x),
314                                         lrint (_video_container_size.height * i->rectangle.y)
315                                         )
316                                 )
317                         );
318         }
319
320         return all;
321 }
322
323 shared_ptr<PlayerVideo>
324 Player::black_player_video_frame (Eyes eyes) const
325 {
326         return shared_ptr<PlayerVideo> (
327                 new PlayerVideo (
328                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
329                         Crop (),
330                         optional<double> (),
331                         _video_container_size,
332                         _video_container_size,
333                         eyes,
334                         PART_WHOLE,
335                         PresetColourConversion::all().front().conversion,
336                         boost::weak_ptr<Content>(),
337                         boost::optional<Frame>()
338                 )
339         );
340 }
341
342 Frame
343 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
344 {
345         DCPTime s = t - piece->content->position ();
346         s = min (piece->content->length_after_trim(), s);
347         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
348
349         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
350            then convert that ContentTime to frames at the content's rate.  However this fails for
351            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
352            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
353
354            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
355         */
356         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
357 }
358
359 DCPTime
360 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
361 {
362         /* See comment in dcp_to_content_video */
363         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
364         return d + piece->content->position();
365 }
366
367 Frame
368 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
369 {
370         DCPTime s = t - piece->content->position ();
371         s = min (piece->content->length_after_trim(), s);
372         /* See notes in dcp_to_content_video */
373         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
374 }
375
376 DCPTime
377 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
378 {
379         /* See comment in dcp_to_content_video */
380         return DCPTime::from_frames (f, _film->audio_frame_rate())
381                 - DCPTime (piece->content->trim_start(), piece->frc)
382                 + piece->content->position();
383 }
384
385 ContentTime
386 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
387 {
388         DCPTime s = t - piece->content->position ();
389         s = min (piece->content->length_after_trim(), s);
390         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
391 }
392
393 DCPTime
394 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
395 {
396         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
397 }
398
399 list<shared_ptr<Font> >
400 Player::get_subtitle_fonts ()
401 {
402         if (!_have_valid_pieces) {
403                 setup_pieces ();
404         }
405
406         list<shared_ptr<Font> > fonts;
407         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
408                 if (p->content->caption) {
409                         /* XXX: things may go wrong if there are duplicate font IDs
410                            with different font files.
411                         */
412                         list<shared_ptr<Font> > f = p->content->caption->fonts ();
413                         copy (f.begin(), f.end(), back_inserter (fonts));
414                 }
415         }
416
417         return fonts;
418 }
419
420 /** Set this player never to produce any video data */
421 void
422 Player::set_ignore_video ()
423 {
424         _ignore_video = true;
425 }
426
427 void
428 Player::set_ignore_subtitle ()
429 {
430         _ignore_subtitle = true;
431 }
432
433 /** Set a type of caption that this player should always burn into the image,
434  *  regardless of the content settings.
435  *  @param type type of captions to burn.
436  */
437 void
438 Player::set_always_burn_captions (CaptionType type)
439 {
440         _always_burn_captions = type;
441 }
442
443 /** Sets up the player to be faster, possibly at the expense of quality */
444 void
445 Player::set_fast ()
446 {
447         _fast = true;
448         _have_valid_pieces = false;
449 }
450
451 void
452 Player::set_play_referenced ()
453 {
454         _play_referenced = true;
455         _have_valid_pieces = false;
456 }
457
458 list<ReferencedReelAsset>
459 Player::get_reel_assets ()
460 {
461         list<ReferencedReelAsset> a;
462
463         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
464                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
465                 if (!j) {
466                         continue;
467                 }
468
469                 scoped_ptr<DCPDecoder> decoder;
470                 try {
471                         decoder.reset (new DCPDecoder (j, _film->log(), false));
472                 } catch (...) {
473                         return a;
474                 }
475
476                 int64_t offset = 0;
477                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
478
479                         DCPOMATIC_ASSERT (j->video_frame_rate ());
480                         double const cfr = j->video_frame_rate().get();
481                         Frame const trim_start = j->trim_start().frames_round (cfr);
482                         Frame const trim_end = j->trim_end().frames_round (cfr);
483                         int const ffr = _film->video_frame_rate ();
484
485                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
486                         if (j->reference_video ()) {
487                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
488                                 DCPOMATIC_ASSERT (ra);
489                                 ra->set_entry_point (ra->entry_point() + trim_start);
490                                 ra->set_duration (ra->duration() - trim_start - trim_end);
491                                 a.push_back (
492                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
493                                         );
494                         }
495
496                         if (j->reference_audio ()) {
497                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
498                                 DCPOMATIC_ASSERT (ra);
499                                 ra->set_entry_point (ra->entry_point() + trim_start);
500                                 ra->set_duration (ra->duration() - trim_start - trim_end);
501                                 a.push_back (
502                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
503                                         );
504                         }
505
506                         if (j->reference_subtitle ()) {
507                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
508                                 DCPOMATIC_ASSERT (ra);
509                                 ra->set_entry_point (ra->entry_point() + trim_start);
510                                 ra->set_duration (ra->duration() - trim_start - trim_end);
511                                 a.push_back (
512                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
513                                         );
514                         }
515
516                         /* Assume that main picture duration is the length of the reel */
517                         offset += k->main_picture()->duration ();
518                 }
519         }
520
521         return a;
522 }
523
524 bool
525 Player::pass ()
526 {
527         if (!_have_valid_pieces) {
528                 setup_pieces ();
529         }
530
531         if (_playlist->length() == DCPTime()) {
532                 /* Special case of an empty Film; just give one black frame */
533                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
534                 return true;
535         }
536
537         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
538
539         shared_ptr<Piece> earliest_content;
540         optional<DCPTime> earliest_time;
541
542         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
543                 if (i->done) {
544                         continue;
545                 }
546
547                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
548                 if (t > i->content->end()) {
549                         i->done = true;
550                 } else {
551
552                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
553                            the video.
554                         */
555                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->caption)) {
556                                 earliest_time = t;
557                                 earliest_content = i;
558                         }
559                 }
560         }
561
562         bool done = false;
563
564         enum {
565                 NONE,
566                 CONTENT,
567                 BLACK,
568                 SILENT
569         } which = NONE;
570
571         if (earliest_content) {
572                 which = CONTENT;
573         }
574
575         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
576                 earliest_time = _black.position ();
577                 which = BLACK;
578         }
579
580         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
581                 earliest_time = _silent.position ();
582                 which = SILENT;
583         }
584
585         switch (which) {
586         case CONTENT:
587                 earliest_content->done = earliest_content->decoder->pass ();
588                 break;
589         case BLACK:
590                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
591                 _black.set_position (_black.position() + one_video_frame());
592                 break;
593         case SILENT:
594         {
595                 DCPTimePeriod period (_silent.period_at_position());
596                 if (_last_audio_time) {
597                         /* Sometimes the thing that happened last finishes fractionally before
598                            this silence.  Bodge the start time of the silence to fix it.  I'm
599                            not sure if this is the right solution --- maybe the last thing should
600                            be padded `forward' rather than this thing padding `back'.
601                         */
602                         period.from = min(period.from, *_last_audio_time);
603                 }
604                 if (period.duration() > one_video_frame()) {
605                         period.to = period.from + one_video_frame();
606                 }
607                 fill_audio (period);
608                 _silent.set_position (period.to);
609                 break;
610         }
611         case NONE:
612                 done = true;
613                 break;
614         }
615
616         /* Emit any audio that is ready */
617
618         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
619            of our streams, or the position of the _silent.
620         */
621         DCPTime pull_to = _film->length ();
622         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
623                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
624                         pull_to = i->second.last_push_end;
625                 }
626         }
627         if (!_silent.done() && _silent.position() < pull_to) {
628                 pull_to = _silent.position();
629         }
630
631         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
632         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
633                 if (_last_audio_time && i->second < *_last_audio_time) {
634                         /* This new data comes before the last we emitted (or the last seek); discard it */
635                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
636                         if (!cut.first) {
637                                 continue;
638                         }
639                         *i = cut;
640                 } else if (_last_audio_time && i->second > *_last_audio_time) {
641                         /* There's a gap between this data and the last we emitted; fill with silence */
642                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
643                 }
644
645                 emit_audio (i->first, i->second);
646         }
647
648         if (done) {
649                 _shuffler->flush ();
650                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
651                         do_emit_video(i->first, i->second);
652                 }
653         }
654
655         return done;
656 }
657
658 optional<PositionImage>
659 Player::captions_for_frame (DCPTime time) const
660 {
661         list<PositionImage> captions;
662
663         int const vfr = _film->video_frame_rate();
664
665         for (int i = 0; i < CAPTION_COUNT; ++i) {
666                 bool const always = _always_burn_captions && *_always_burn_captions == i;
667                 BOOST_FOREACH (
668                         PlayerCaption j,
669                         _active_captions[i].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), always)
670                         ) {
671
672                         /* Image subtitles */
673                         list<PositionImage> c = transform_bitmap_captions (j.image);
674                         copy (c.begin(), c.end(), back_inserter (captions));
675
676                         /* Text subtitles (rendered to an image) */
677                         if (!j.text.empty ()) {
678                                 list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
679                                 copy (s.begin(), s.end(), back_inserter (captions));
680                         }
681                 }
682         }
683
684         if (captions.empty ()) {
685                 return optional<PositionImage> ();
686         }
687
688         return merge (captions);
689 }
690
691 void
692 Player::video (weak_ptr<Piece> wp, ContentVideo video)
693 {
694         shared_ptr<Piece> piece = wp.lock ();
695         if (!piece) {
696                 return;
697         }
698
699         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
700         if (frc.skip && (video.frame % 2) == 1) {
701                 return;
702         }
703
704         /* Time of the first frame we will emit */
705         DCPTime const time = content_video_to_dcp (piece, video.frame);
706
707         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
708            if it's after the content's period here as in that case we still need to fill any gap between
709            `now' and the end of the content's period.
710         */
711         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
712                 return;
713         }
714
715         /* Fill gaps that we discover now that we have some video which needs to be emitted.
716            This is where we need to fill to.
717         */
718         DCPTime fill_to = min (time, piece->content->end());
719
720         if (_last_video_time) {
721                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
722                 LastVideoMap::const_iterator last = _last_video.find (wp);
723                 if (_film->three_d()) {
724                         DCPTime j = fill_from;
725                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
726                         if (eyes == EYES_BOTH) {
727                                 eyes = EYES_LEFT;
728                         }
729                         while (j < fill_to || eyes != video.eyes) {
730                                 if (last != _last_video.end()) {
731                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
732                                         copy->set_eyes (eyes);
733                                         emit_video (copy, j);
734                                 } else {
735                                         emit_video (black_player_video_frame(eyes), j);
736                                 }
737                                 if (eyes == EYES_RIGHT) {
738                                         j += one_video_frame();
739                                 }
740                                 eyes = increment_eyes (eyes);
741                         }
742                 } else {
743                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
744                                 if (last != _last_video.end()) {
745                                         emit_video (last->second, j);
746                                 } else {
747                                         emit_video (black_player_video_frame(EYES_BOTH), j);
748                                 }
749                         }
750                 }
751         }
752
753         _last_video[wp].reset (
754                 new PlayerVideo (
755                         video.image,
756                         piece->content->video->crop (),
757                         piece->content->video->fade (video.frame),
758                         piece->content->video->scale().size (
759                                 piece->content->video, _video_container_size, _film->frame_size ()
760                                 ),
761                         _video_container_size,
762                         video.eyes,
763                         video.part,
764                         piece->content->video->colour_conversion(),
765                         piece->content,
766                         video.frame
767                         )
768                 );
769
770         DCPTime t = time;
771         for (int i = 0; i < frc.repeat; ++i) {
772                 if (t < piece->content->end()) {
773                         emit_video (_last_video[wp], t);
774                 }
775                 t += one_video_frame ();
776         }
777 }
778
779 void
780 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
781 {
782         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
783
784         shared_ptr<Piece> piece = wp.lock ();
785         if (!piece) {
786                 return;
787         }
788
789         shared_ptr<AudioContent> content = piece->content->audio;
790         DCPOMATIC_ASSERT (content);
791
792         /* Compute time in the DCP */
793         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
794         /* And the end of this block in the DCP */
795         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
796
797         /* Remove anything that comes before the start or after the end of the content */
798         if (time < piece->content->position()) {
799                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
800                 if (!cut.first) {
801                         /* This audio is entirely discarded */
802                         return;
803                 }
804                 content_audio.audio = cut.first;
805                 time = cut.second;
806         } else if (time > piece->content->end()) {
807                 /* Discard it all */
808                 return;
809         } else if (end > piece->content->end()) {
810                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
811                 if (remaining_frames == 0) {
812                         return;
813                 }
814                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
815                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
816                 content_audio.audio = cut;
817         }
818
819         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
820
821         /* Gain */
822
823         if (content->gain() != 0) {
824                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
825                 gain->apply_gain (content->gain ());
826                 content_audio.audio = gain;
827         }
828
829         /* Remap */
830
831         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
832
833         /* Process */
834
835         if (_audio_processor) {
836                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
837         }
838
839         /* Push */
840
841         _audio_merger.push (content_audio.audio, time);
842         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
843         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
844 }
845
846 void
847 Player::bitmap_text_start (weak_ptr<Piece> wp, ContentBitmapCaption subtitle)
848 {
849         shared_ptr<Piece> piece = wp.lock ();
850         if (!piece) {
851                 return;
852         }
853
854         /* Apply content's subtitle offsets */
855         subtitle.sub.rectangle.x += piece->content->caption->x_offset ();
856         subtitle.sub.rectangle.y += piece->content->caption->y_offset ();
857
858         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
859         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((piece->content->caption->x_scale() - 1) / 2);
860         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((piece->content->caption->y_scale() - 1) / 2);
861
862         /* Apply content's subtitle scale */
863         subtitle.sub.rectangle.width *= piece->content->caption->x_scale ();
864         subtitle.sub.rectangle.height *= piece->content->caption->y_scale ();
865
866         PlayerCaption ps;
867         ps.image.push_back (subtitle.sub);
868         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
869
870         _active_captions[subtitle.type()].add_from (wp, ps, from);
871 }
872
873 void
874 Player::plain_text_start (weak_ptr<Piece> wp, ContentTextCaption subtitle)
875 {
876         shared_ptr<Piece> piece = wp.lock ();
877         if (!piece) {
878                 return;
879         }
880
881         PlayerCaption ps;
882         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
883
884         if (from > piece->content->end()) {
885                 return;
886         }
887
888         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
889                 s.set_h_position (s.h_position() + piece->content->caption->x_offset ());
890                 s.set_v_position (s.v_position() + piece->content->caption->y_offset ());
891                 float const xs = piece->content->caption->x_scale();
892                 float const ys = piece->content->caption->y_scale();
893                 float size = s.size();
894
895                 /* Adjust size to express the common part of the scaling;
896                    e.g. if xs = ys = 0.5 we scale size by 2.
897                 */
898                 if (xs > 1e-5 && ys > 1e-5) {
899                         size *= 1 / min (1 / xs, 1 / ys);
900                 }
901                 s.set_size (size);
902
903                 /* Then express aspect ratio changes */
904                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
905                         s.set_aspect_adjust (xs / ys);
906                 }
907
908                 s.set_in (dcp::Time(from.seconds(), 1000));
909                 ps.text.push_back (TextCaption (s, piece->content->caption->outline_width()));
910                 ps.add_fonts (piece->content->caption->fonts ());
911         }
912
913         _active_captions[subtitle.type()].add_from (wp, ps, from);
914 }
915
916 void
917 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to, CaptionType type)
918 {
919         if (!_active_captions[type].have (wp)) {
920                 return;
921         }
922
923         shared_ptr<Piece> piece = wp.lock ();
924         if (!piece) {
925                 return;
926         }
927
928         DCPTime const dcp_to = content_time_to_dcp (piece, to);
929
930         if (dcp_to > piece->content->end()) {
931                 return;
932         }
933
934         pair<PlayerCaption, DCPTime> from = _active_captions[type].add_to (wp, dcp_to);
935
936         bool const always = _always_burn_captions && *_always_burn_captions == type;
937         if (piece->content->caption->use() && !always && !piece->content->caption->burn()) {
938                 Caption (from.first, type, DCPTimePeriod (from.second, dcp_to));
939         }
940 }
941
942 void
943 Player::seek (DCPTime time, bool accurate)
944 {
945         if (!_have_valid_pieces) {
946                 setup_pieces ();
947         }
948
949         if (_shuffler) {
950                 _shuffler->clear ();
951         }
952
953         _delay.clear ();
954
955         if (_audio_processor) {
956                 _audio_processor->flush ();
957         }
958
959         _audio_merger.clear ();
960         for (int i = 0; i < CAPTION_COUNT; ++i) {
961                 _active_captions[i].clear ();
962         }
963
964         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
965                 if (time < i->content->position()) {
966                         /* Before; seek to the start of the content */
967                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
968                         i->done = false;
969                 } else if (i->content->position() <= time && time < i->content->end()) {
970                         /* During; seek to position */
971                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
972                         i->done = false;
973                 } else {
974                         /* After; this piece is done */
975                         i->done = true;
976                 }
977         }
978
979         if (accurate) {
980                 _last_video_time = time;
981                 _last_video_eyes = EYES_LEFT;
982                 _last_audio_time = time;
983         } else {
984                 _last_video_time = optional<DCPTime>();
985                 _last_video_eyes = optional<Eyes>();
986                 _last_audio_time = optional<DCPTime>();
987         }
988
989         _black.set_position (time);
990         _silent.set_position (time);
991
992         _last_video.clear ();
993 }
994
995 void
996 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
997 {
998         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
999            player before the video that requires them.
1000         */
1001         _delay.push_back (make_pair (pv, time));
1002
1003         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1004                 _last_video_time = time + one_video_frame();
1005         }
1006         _last_video_eyes = increment_eyes (pv->eyes());
1007
1008         if (_delay.size() < 3) {
1009                 return;
1010         }
1011
1012         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1013         _delay.pop_front();
1014         do_emit_video (to_do.first, to_do.second);
1015 }
1016
1017 void
1018 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1019 {
1020         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1021                 for (int i = 0; i < CAPTION_COUNT; ++i) {
1022                         _active_captions[i].clear_before (time);
1023                 }
1024         }
1025
1026         optional<PositionImage> captions = captions_for_frame (time);
1027         if (captions) {
1028                 pv->set_caption (captions.get ());
1029         }
1030
1031         Video (pv, time);
1032 }
1033
1034 void
1035 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1036 {
1037         /* Log if the assert below is about to fail */
1038         if (_last_audio_time && time != *_last_audio_time) {
1039                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1040         }
1041
1042         /* This audio must follow on from the previous */
1043         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1044         Audio (data, time);
1045         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1046 }
1047
1048 void
1049 Player::fill_audio (DCPTimePeriod period)
1050 {
1051         if (period.from == period.to) {
1052                 return;
1053         }
1054
1055         DCPOMATIC_ASSERT (period.from < period.to);
1056
1057         DCPTime t = period.from;
1058         while (t < period.to) {
1059                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1060                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1061                 if (samples) {
1062                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1063                         silence->make_silent ();
1064                         emit_audio (silence, t);
1065                 }
1066                 t += block;
1067         }
1068 }
1069
1070 DCPTime
1071 Player::one_video_frame () const
1072 {
1073         return DCPTime::from_frames (1, _film->video_frame_rate ());
1074 }
1075
1076 pair<shared_ptr<AudioBuffers>, DCPTime>
1077 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1078 {
1079         DCPTime const discard_time = discard_to - time;
1080         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1081         Frame remaining_frames = audio->frames() - discard_frames;
1082         if (remaining_frames <= 0) {
1083                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1084         }
1085         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1086         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1087         return make_pair(cut, time + discard_time);
1088 }
1089
1090 void
1091 Player::set_dcp_decode_reduction (optional<int> reduction)
1092 {
1093         if (reduction == _dcp_decode_reduction) {
1094                 return;
1095         }
1096
1097         _dcp_decode_reduction = reduction;
1098         _have_valid_pieces = false;
1099         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1100 }
1101
1102 DCPTime
1103 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1104 {
1105         if (_have_valid_pieces) {
1106                 setup_pieces ();
1107         }
1108
1109         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1110                 if (i->content == content) {
1111                         return content_time_to_dcp (i, t);
1112                 }
1113         }
1114
1115         DCPOMATIC_ASSERT (false);
1116         return DCPTime ();
1117 }