91d4297086ad152ed33f9be9d5ed622c7c31a929
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         auto decoder = j->decoder_for(i);
191                         if (decoder) {
192                                 old_decoder = decoder;
193                                 break;
194                         }
195                 }
196
197                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
198                 DCPOMATIC_ASSERT (decoder);
199
200                 FrameRateChange frc (_film, i);
201
202                 if (decoder->video && _ignore_video) {
203                         decoder->video->set_ignore (true);
204                 }
205
206                 if (decoder->audio && _ignore_audio) {
207                         decoder->audio->set_ignore (true);
208                 }
209
210                 if (_ignore_text) {
211                         for (auto i: decoder->text) {
212                                 i->set_ignore (true);
213                         }
214                 }
215
216                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
217                 if (dcp) {
218                         dcp->set_decode_referenced (_play_referenced);
219                         if (_play_referenced) {
220                                 dcp->set_forced_reduction (_dcp_decode_reduction);
221                         }
222                 }
223
224                 auto piece = make_shared<Piece>(i, decoder, frc);
225                 _pieces.push_back (piece);
226
227                 if (decoder->video) {
228                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
229                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
230                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
231                         } else {
232                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
233                         }
234                 }
235
236                 if (decoder->audio) {
237                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
238                 }
239
240                 auto j = decoder->text.begin();
241
242                 while (j != decoder->text.end()) {
243                         (*j)->BitmapStart.connect (
244                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246                         (*j)->PlainStart.connect (
247                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
248                                 );
249                         (*j)->Stop.connect (
250                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
251                                 );
252
253                         ++j;
254                 }
255
256                 if (decoder->atmos) {
257                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
258                 }
259         }
260
261         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
262                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
263                         /* Look for content later in the content list with in-use video that overlaps this */
264                         auto period = DCPTimePeriod((*i)->position(), (*i)->end(_film));
265                         auto j = i;
266                         ++j;
267                         for (; j != _pieces.end(); ++j) {
268                                 if ((*j)->use_video()) {
269                                         (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->end(_film)).overlap(period);
270                                 }
271                         }
272                 }
273         }
274
275         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
276         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
277
278         _last_video_time = boost::optional<dcpomatic::DCPTime>();
279         _last_video_eyes = Eyes::BOTH;
280         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
281 }
282
283
284 optional<DCPTime>
285 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
286 {
287         boost::mutex::scoped_lock lm (_mutex);
288
289         for (auto i: _pieces) {
290                 auto dcp = i->content_time_to_dcp(content, t);
291                 if (dcp) {
292                         return *dcp;
293                 }
294         }
295
296         /* We couldn't find this content; perhaps things are being changed over */
297         return {};
298 }
299
300
301 void
302 Player::playlist_content_change (ChangeType type, int property, bool frequent)
303 {
304         if (property == VideoContentProperty::CROP) {
305                 if (type == ChangeType::DONE) {
306                         auto const vcs = video_container_size();
307                         boost::mutex::scoped_lock lm (_mutex);
308                         for (auto const& i: _delay) {
309                                 i.first->reset_metadata (_film, vcs);
310                         }
311                 }
312         } else {
313                 if (type == ChangeType::PENDING) {
314                         /* The player content is probably about to change, so we can't carry on
315                            until that has happened and we've rebuilt our pieces.  Stop pass()
316                            and seek() from working until then.
317                         */
318                         ++_suspended;
319                 } else if (type == ChangeType::DONE) {
320                         /* A change in our content has gone through.  Re-build our pieces. */
321                         setup_pieces ();
322                         --_suspended;
323                 } else if (type == ChangeType::CANCELLED) {
324                         --_suspended;
325                 }
326         }
327
328         Change (type, property, frequent);
329 }
330
331
332 void
333 Player::set_video_container_size (dcp::Size s)
334 {
335         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
336
337         {
338                 boost::mutex::scoped_lock lm (_mutex);
339
340                 if (s == _video_container_size) {
341                         lm.unlock ();
342                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
343                         return;
344                 }
345
346                 _video_container_size = s;
347
348                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
349                 _black_image->make_black ();
350         }
351
352         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
353 }
354
355
356 void
357 Player::playlist_change (ChangeType type)
358 {
359         if (type == ChangeType::DONE) {
360                 setup_pieces ();
361         }
362         Change (type, PlayerProperty::PLAYLIST, false);
363 }
364
365
366 void
367 Player::film_change (ChangeType type, Film::Property p)
368 {
369         /* Here we should notice Film properties that affect our output, and
370            alert listeners that our output now would be different to how it was
371            last time we were run.
372         */
373
374         if (p == Film::Property::CONTAINER) {
375                 Change (type, PlayerProperty::FILM_CONTAINER, false);
376         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
377                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
378                    so we need new pieces here.
379                 */
380                 if (type == ChangeType::DONE) {
381                         setup_pieces ();
382                 }
383                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
384         } else if (p == Film::Property::AUDIO_PROCESSOR) {
385                 if (type == ChangeType::DONE && _film->audio_processor ()) {
386                         boost::mutex::scoped_lock lm (_mutex);
387                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
388                 }
389         } else if (p == Film::Property::AUDIO_CHANNELS) {
390                 if (type == ChangeType::DONE) {
391                         boost::mutex::scoped_lock lm (_mutex);
392                         _audio_merger.clear ();
393                 }
394         }
395 }
396
397
398 shared_ptr<PlayerVideo>
399 Player::black_player_video_frame (Eyes eyes) const
400 {
401         return std::make_shared<PlayerVideo> (
402                 std::make_shared<const RawImageProxy>(_black_image),
403                 Crop(),
404                 optional<double>(),
405                 _video_container_size,
406                 _video_container_size,
407                 eyes,
408                 Part::WHOLE,
409                 PresetColourConversion::all().front().conversion,
410                 VideoRange::FULL,
411                 std::weak_ptr<Content>(),
412                 boost::optional<Frame>(),
413                 false
414         );
415 }
416
417
418 vector<FontData>
419 Player::get_subtitle_fonts ()
420 {
421         boost::mutex::scoped_lock lm (_mutex);
422
423         vector<FontData> fonts;
424         for (auto i: _pieces) {
425                 /* XXX: things may go wrong if there are duplicate font IDs
426                    with different font files.
427                 */
428                 auto f = i->decoder->fonts ();
429                 copy (f.begin(), f.end(), back_inserter(fonts));
430         }
431
432         return fonts;
433 }
434
435
436 /** Set this player never to produce any video data */
437 void
438 Player::set_ignore_video ()
439 {
440         boost::mutex::scoped_lock lm (_mutex);
441         _ignore_video = true;
442         setup_pieces_unlocked ();
443 }
444
445
446 void
447 Player::set_ignore_audio ()
448 {
449         boost::mutex::scoped_lock lm (_mutex);
450         _ignore_audio = true;
451         setup_pieces_unlocked ();
452 }
453
454
455 void
456 Player::set_ignore_text ()
457 {
458         boost::mutex::scoped_lock lm (_mutex);
459         _ignore_text = true;
460         setup_pieces_unlocked ();
461 }
462
463
464 /** Set the player to always burn open texts into the image regardless of the content settings */
465 void
466 Player::set_always_burn_open_subtitles ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _always_burn_open_subtitles = true;
470 }
471
472
473 /** Sets up the player to be faster, possibly at the expense of quality */
474 void
475 Player::set_fast ()
476 {
477         boost::mutex::scoped_lock lm (_mutex);
478         _fast = true;
479         setup_pieces_unlocked ();
480 }
481
482
483 void
484 Player::set_play_referenced ()
485 {
486         boost::mutex::scoped_lock lm (_mutex);
487         _play_referenced = true;
488         setup_pieces_unlocked ();
489 }
490
491
492 static void
493 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
494 {
495         DCPOMATIC_ASSERT (r);
496         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
497         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
498         if (r->actual_duration() > 0) {
499                 a.push_back (
500                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
501                         );
502         }
503 }
504
505
506 list<ReferencedReelAsset>
507 Player::get_reel_assets ()
508 {
509         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
510
511         list<ReferencedReelAsset> a;
512
513         for (auto i: playlist()->content()) {
514                 auto j = dynamic_pointer_cast<DCPContent> (i);
515                 if (!j) {
516                         continue;
517                 }
518
519                 scoped_ptr<DCPDecoder> decoder;
520                 try {
521                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
522                 } catch (...) {
523                         return a;
524                 }
525
526                 DCPOMATIC_ASSERT (j->video_frame_rate ());
527                 double const cfr = j->video_frame_rate().get();
528                 Frame const trim_start = j->trim_start().frames_round (cfr);
529                 Frame const trim_end = j->trim_end().frames_round (cfr);
530                 int const ffr = _film->video_frame_rate ();
531
532                 /* position in the asset from the start */
533                 int64_t offset_from_start = 0;
534                 /* position in the asset from the end */
535                 int64_t offset_from_end = 0;
536                 for (auto k: decoder->reels()) {
537                         /* Assume that main picture duration is the length of the reel */
538                         offset_from_end += k->main_picture()->actual_duration();
539                 }
540
541                 for (auto k: decoder->reels()) {
542
543                         /* Assume that main picture duration is the length of the reel */
544                         int64_t const reel_duration = k->main_picture()->actual_duration();
545
546                         /* See doc/design/trim_reels.svg */
547                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
548                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
549
550                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
551                         if (j->reference_video ()) {
552                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
553                         }
554
555                         if (j->reference_audio ()) {
556                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
557                         }
558
559                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
560                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
561                         }
562
563                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
564                                 for (auto l: k->closed_captions()) {
565                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
566                                 }
567                         }
568
569                         offset_from_start += reel_duration;
570                         offset_from_end -= reel_duration;
571                 }
572         }
573
574         return a;
575 }
576
577
578 bool
579 Player::pass ()
580 {
581         boost::mutex::scoped_lock lm (_mutex);
582
583         if (_suspended) {
584                 /* We can't pass in this state */
585                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
586                 return false;
587         }
588
589         if (_playback_length == DCPTime()) {
590                 /* Special; just give one black frame */
591                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
592                 return true;
593         }
594
595         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
596
597         shared_ptr<Piece> earliest_content;
598         optional<DCPTime> earliest_time;
599
600         for (auto i: _pieces) {
601                 auto time = i->decoder_before(_film, earliest_time);
602                 if (time) {
603                         earliest_time = *time;
604                         earliest_content = i;
605                 }
606         }
607
608         bool done = false;
609
610         enum {
611                 NONE,
612                 CONTENT,
613                 BLACK,
614                 SILENT
615         } which = NONE;
616
617         if (earliest_content) {
618                 which = CONTENT;
619         }
620
621         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
622                 earliest_time = _black.position ();
623                 which = BLACK;
624         }
625
626         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
627                 earliest_time = _silent.position ();
628                 which = SILENT;
629         }
630
631         switch (which) {
632         case CONTENT:
633         {
634                 earliest_content->pass();
635                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
636                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
637                            to `hide' the fact that no audio was emitted during the referenced DCP (though
638                            we need to behave as though it was).
639                         */
640                         _last_audio_time = earliest_content->end (_film);
641                 }
642                 break;
643         }
644         case BLACK:
645                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
646                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
647                 _black.set_position (_black.position() + one_video_frame());
648                 break;
649         case SILENT:
650         {
651                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
652                 DCPTimePeriod period (_silent.period_at_position());
653                 if (_last_audio_time) {
654                         /* Sometimes the thing that happened last finishes fractionally before
655                            or after this silence.  Bodge the start time of the silence to fix it.
656                            I think this is nothing to worry about since we will just add or
657                            remove a little silence at the end of some content.
658                         */
659                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
660                         /* Let's not worry about less than a frame at 24fps */
661                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
662                         if (error >= too_much_error) {
663                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
664                         }
665                         DCPOMATIC_ASSERT (error < too_much_error);
666                         period.from = *_last_audio_time;
667                 }
668                 if (period.duration() > one_video_frame()) {
669                         period.to = period.from + one_video_frame();
670                 }
671                 fill_audio (period);
672                 _silent.set_position (period.to);
673                 break;
674         }
675         case NONE:
676                 done = true;
677                 break;
678         }
679
680         /* Emit any audio that is ready */
681
682         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
683            of our streams, or the position of the _silent.
684         */
685         auto pull_to = _playback_length;
686         for (auto i: _pieces) {
687                 i->update_pull_to (pull_to);
688         }
689         if (!_silent.done() && _silent.position() < pull_to) {
690                 pull_to = _silent.position();
691         }
692
693         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
694         auto audio = _audio_merger.pull (pull_to);
695         for (auto i = audio.begin(); i != audio.end(); ++i) {
696                 if (_last_audio_time && i->second < *_last_audio_time) {
697                         /* This new data comes before the last we emitted (or the last seek); discard it */
698                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
699                         if (!cut.first) {
700                                 continue;
701                         }
702                         *i = cut;
703                 } else if (_last_audio_time && i->second > *_last_audio_time) {
704                         /* There's a gap between this data and the last we emitted; fill with silence */
705                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
706                 }
707
708                 emit_audio (i->first, i->second);
709         }
710
711         if (done) {
712                 _shuffler->flush ();
713                 for (auto const& i: _delay) {
714                         do_emit_video(i.first, i.second);
715                 }
716         }
717
718         return done;
719 }
720
721
722 /** @return Open subtitles for the frame at the given time, converted to images */
723 optional<PositionImage>
724 Player::open_subtitles_for_frame (DCPTime time) const
725 {
726         list<PositionImage> captions;
727         int const vfr = _film->video_frame_rate();
728
729         for (
730                 auto j:
731                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
732                 ) {
733
734                 /* Bitmap subtitles */
735                 for (auto i: j.bitmap) {
736                         if (!i.image) {
737                                 continue;
738                         }
739
740                         /* i.image will already have been scaled to fit _video_container_size */
741                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
742
743                         captions.push_back (
744                                 PositionImage (
745                                         i.image,
746                                         Position<int> (
747                                                 lrint(_video_container_size.width * i.rectangle.x),
748                                                 lrint(_video_container_size.height * i.rectangle.y)
749                                                 )
750                                         )
751                                 );
752                 }
753
754                 /* String subtitles (rendered to an image) */
755                 if (!j.string.empty()) {
756                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
757                         copy (s.begin(), s.end(), back_inserter (captions));
758                 }
759         }
760
761         if (captions.empty()) {
762                 return {};
763         }
764
765         return merge (captions);
766 }
767
768
769 void
770 Player::video (weak_ptr<Piece> wp, ContentVideo video)
771 {
772         auto piece = wp.lock ();
773         if (!piece) {
774                 return;
775         }
776
777         if (!piece->use_video()) {
778                 return;
779         }
780
781         auto frc = piece->frame_rate_change();
782         if (frc.skip && (video.frame % 2) == 1) {
783                 return;
784         }
785
786         /* Time of the first frame we will emit */
787         DCPTime const time = piece->content_video_to_dcp (video.frame);
788         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
789
790         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
791            if it's after the content's period here as in that case we still need to fill any gap between
792            `now' and the end of the content's period.
793         */
794         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
795                 return;
796         }
797
798         if (piece->ignore_video && piece->ignore_video->contains(time)) {
799                 return;
800         }
801
802         /* Fill gaps that we discover now that we have some video which needs to be emitted.
803            This is where we need to fill to.
804         */
805         DCPTime fill_to = min (time, piece->end(_film));
806
807         if (_last_video_time) {
808                 DCPTime fill_from = max (*_last_video_time, piece->position());
809
810                 /* Fill if we have more than half a frame to do */
811                 if ((fill_to - fill_from) > one_video_frame() / 2) {
812                         auto last = _last_video.find (wp);
813                         if (_film->three_d()) {
814                                 auto fill_to_eyes = video.eyes;
815                                 if (fill_to_eyes == Eyes::BOTH) {
816                                         fill_to_eyes = Eyes::LEFT;
817                                 }
818                                 if (fill_to == piece->end(_film)) {
819                                         /* Don't fill after the end of the content */
820                                         fill_to_eyes = Eyes::LEFT;
821                                 }
822                                 auto j = fill_from;
823                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
824                                 if (eyes == Eyes::BOTH) {
825                                         eyes = Eyes::LEFT;
826                                 }
827                                 while (j < fill_to || eyes != fill_to_eyes) {
828                                         if (last != _last_video.end()) {
829                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
830                                                 auto copy = last->second->shallow_copy();
831                                                 copy->set_eyes (eyes);
832                                                 emit_video (copy, j);
833                                         } else {
834                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
835                                                 emit_video (black_player_video_frame(eyes), j);
836                                         }
837                                         if (eyes == Eyes::RIGHT) {
838                                                 j += one_video_frame();
839                                         }
840                                         eyes = increment_eyes (eyes);
841                                 }
842                         } else {
843                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
844                                         if (last != _last_video.end()) {
845                                                 emit_video (last->second, j);
846                                         } else {
847                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
848                                         }
849                                 }
850                         }
851                 }
852         }
853
854         _last_video[wp] = piece->player_video (video, _film, _video_container_size);
855
856         DCPTime t = time;
857         for (int i = 0; i < frc.repeat; ++i) {
858                 if (t < piece->end(_film)) {
859                         emit_video (_last_video[wp], t);
860                 }
861                 t += one_video_frame ();
862         }
863 }
864
865
866 void
867 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
868 {
869         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
870
871         auto piece = wp.lock ();
872         if (!piece) {
873                 return;
874         }
875
876         int const rfr = piece->resampled_audio_frame_rate (_film);
877
878         /* Compute time in the DCP */
879         auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
880         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
881
882         /* And the end of this block in the DCP */
883         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
884
885         /* Remove anything that comes before the start or after the end of the content */
886         if (time < piece->position()) {
887                 auto cut = discard_audio (content_audio.audio, time, piece->position());
888                 if (!cut.first) {
889                         /* This audio is entirely discarded */
890                         return;
891                 }
892                 content_audio.audio = cut.first;
893                 time = cut.second;
894         } else if (time > piece->end(_film)) {
895                 /* Discard it all */
896                 return;
897         } else if (end > piece->end(_film)) {
898                 Frame const remaining_frames = DCPTime(piece->end(_film) - time).frames_round(rfr);
899                 if (remaining_frames == 0) {
900                         return;
901                 }
902                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
903         }
904
905         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
906
907         /* Gain */
908
909         if (piece->audio_gain() != 0) {
910                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
911                 gain->apply_gain (piece->audio_gain());
912                 content_audio.audio = gain;
913         }
914
915         /* Remap */
916
917         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
918
919         /* Process */
920
921         if (_audio_processor) {
922                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
923         }
924
925         /* Push */
926
927         _audio_merger.push (content_audio.audio, time);
928         piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
929 }
930
931
932 void
933 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
934 {
935         auto piece = wp.lock ();
936         auto content = wc.lock ();
937         auto text = wt.lock ();
938         if (!piece || !content || !text) {
939                 return;
940         }
941
942         /* Apply content's subtitle offsets */
943         subtitle.sub.rectangle.x += text->x_offset ();
944         subtitle.sub.rectangle.y += text->y_offset ();
945
946         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
947         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
948         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
949
950         /* Apply content's subtitle scale */
951         subtitle.sub.rectangle.width *= text->x_scale ();
952         subtitle.sub.rectangle.height *= text->y_scale ();
953
954         PlayerText ps;
955         auto image = subtitle.sub.image;
956
957         /* We will scale the subtitle up to fit _video_container_size */
958         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
959         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
960         if (width == 0 || height == 0) {
961                 return;
962         }
963
964         dcp::Size scaled_size (width, height);
965         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
966         auto from = piece->content_time_to_dcp(content, subtitle.from());
967         DCPOMATIC_ASSERT (from);
968
969         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
970 }
971
972
973 void
974 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
975 {
976         auto piece = wp.lock ();
977         auto content = wc.lock ();
978         auto text = wt.lock ();
979         if (!piece || !content || !text) {
980                 return;
981         }
982
983         PlayerText ps;
984         auto const from = piece->content_time_to_dcp(content, subtitle.from());
985         DCPOMATIC_ASSERT (from);
986
987         if (from > piece->end(_film)) {
988                 return;
989         }
990
991         for (auto s: subtitle.subs) {
992                 s.set_h_position (s.h_position() + text->x_offset ());
993                 s.set_v_position (s.v_position() + text->y_offset ());
994                 float const xs = text->x_scale();
995                 float const ys = text->y_scale();
996                 float size = s.size();
997
998                 /* Adjust size to express the common part of the scaling;
999                    e.g. if xs = ys = 0.5 we scale size by 2.
1000                 */
1001                 if (xs > 1e-5 && ys > 1e-5) {
1002                         size *= 1 / min (1 / xs, 1 / ys);
1003                 }
1004                 s.set_size (size);
1005
1006                 /* Then express aspect ratio changes */
1007                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1008                         s.set_aspect_adjust (xs / ys);
1009                 }
1010
1011                 s.set_in (dcp::Time(from->seconds(), 1000));
1012                 ps.string.push_back (StringText (s, text->outline_width()));
1013                 ps.add_fonts (text->fonts ());
1014         }
1015
1016         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1017 }
1018
1019
1020 void
1021 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1022 {
1023         auto content = wc.lock ();
1024         auto text = wt.lock ();
1025         if (!text) {
1026                 return;
1027         }
1028
1029         if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1030                 return;
1031         }
1032
1033         shared_ptr<Piece> piece = wp.lock ();
1034         if (!piece) {
1035                 return;
1036         }
1037
1038         auto const dcp_to = piece->content_time_to_dcp(content, to);
1039         DCPOMATIC_ASSERT (dcp_to);
1040
1041         if (*dcp_to > piece->end(_film)) {
1042                 return;
1043         }
1044
1045         auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1046
1047         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1048         if (text->use() && !always && !text->burn()) {
1049                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1050         }
1051 }
1052
1053
1054 void
1055 Player::seek (DCPTime time, bool accurate)
1056 {
1057         boost::mutex::scoped_lock lm (_mutex);
1058         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1059
1060         if (_suspended) {
1061                 /* We can't seek in this state */
1062                 return;
1063         }
1064
1065         if (_shuffler) {
1066                 _shuffler->clear ();
1067         }
1068
1069         _delay.clear ();
1070
1071         if (_audio_processor) {
1072                 _audio_processor->flush ();
1073         }
1074
1075         _audio_merger.clear ();
1076         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1077                 _active_texts[i].clear ();
1078         }
1079
1080         for (auto i: _pieces) {
1081                 i->seek (_film, time, accurate);
1082         }
1083
1084         if (accurate) {
1085                 _last_video_time = time;
1086                 _last_video_eyes = Eyes::LEFT;
1087                 _last_audio_time = time;
1088         } else {
1089                 _last_video_time = optional<DCPTime>();
1090                 _last_video_eyes = optional<Eyes>();
1091                 _last_audio_time = optional<DCPTime>();
1092         }
1093
1094         _black.set_position (time);
1095         _silent.set_position (time);
1096
1097         _last_video.clear ();
1098 }
1099
1100
1101 void
1102 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1103 {
1104         if (!_film->three_d()) {
1105                 if (pv->eyes() == Eyes::LEFT) {
1106                         /* Use left-eye images for both eyes... */
1107                         pv->set_eyes (Eyes::BOTH);
1108                 } else if (pv->eyes() == Eyes::RIGHT) {
1109                         /* ...and discard the right */
1110                         return;
1111                 }
1112         }
1113
1114         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1115            player before the video that requires them.
1116         */
1117         _delay.push_back (make_pair (pv, time));
1118
1119         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1120                 _last_video_time = time + one_video_frame();
1121         }
1122         _last_video_eyes = increment_eyes (pv->eyes());
1123
1124         if (_delay.size() < 3) {
1125                 return;
1126         }
1127
1128         auto to_do = _delay.front();
1129         _delay.pop_front();
1130         do_emit_video (to_do.first, to_do.second);
1131 }
1132
1133
1134 void
1135 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1136 {
1137         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1138                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1139                         _active_texts[i].clear_before (time);
1140                 }
1141         }
1142
1143         auto subtitles = open_subtitles_for_frame (time);
1144         if (subtitles) {
1145                 pv->set_text (subtitles.get ());
1146         }
1147
1148         Video (pv, time);
1149 }
1150
1151
1152 void
1153 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1154 {
1155         /* Log if the assert below is about to fail */
1156         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1157                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1158         }
1159
1160         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1161         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1162         Audio (data, time, _film->audio_frame_rate());
1163         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1164 }
1165
1166
1167 void
1168 Player::fill_audio (DCPTimePeriod period)
1169 {
1170         if (period.from == period.to) {
1171                 return;
1172         }
1173
1174         DCPOMATIC_ASSERT (period.from < period.to);
1175
1176         DCPTime t = period.from;
1177         while (t < period.to) {
1178                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1179                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1180                 if (samples) {
1181                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1182                         silence->make_silent ();
1183                         emit_audio (silence, t);
1184                 }
1185                 t += block;
1186         }
1187 }
1188
1189
1190 DCPTime
1191 Player::one_video_frame () const
1192 {
1193         return DCPTime::from_frames (1, _film->video_frame_rate ());
1194 }
1195
1196
1197 pair<shared_ptr<AudioBuffers>, DCPTime>
1198 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1199 {
1200         auto const discard_time = discard_to - time;
1201         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1202         auto remaining_frames = audio->frames() - discard_frames;
1203         if (remaining_frames <= 0) {
1204                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1205         }
1206         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1207         return make_pair(cut, time + discard_time);
1208 }
1209
1210
1211 void
1212 Player::set_dcp_decode_reduction (optional<int> reduction)
1213 {
1214         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1215
1216         {
1217                 boost::mutex::scoped_lock lm (_mutex);
1218
1219                 if (reduction == _dcp_decode_reduction) {
1220                         lm.unlock ();
1221                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1222                         return;
1223                 }
1224
1225                 _dcp_decode_reduction = reduction;
1226                 setup_pieces_unlocked ();
1227         }
1228
1229         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1230 }
1231
1232
1233 shared_ptr<const Playlist>
1234 Player::playlist () const
1235 {
1236         return _playlist ? _playlist : _film->playlist();
1237 }
1238
1239
1240 void
1241 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1242 {
1243         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1244 }
1245