Add Piece::position().
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "player.h"
24 #include "film.h"
25 #include "audio_buffers.h"
26 #include "content_audio.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
29 #include "job.h"
30 #include "image.h"
31 #include "raw_image_proxy.h"
32 #include "ratio.h"
33 #include "log.h"
34 #include "render_text.h"
35 #include "config.h"
36 #include "content_video.h"
37 #include "player_video.h"
38 #include "frame_rate_change.h"
39 #include "audio_processor.h"
40 #include "playlist.h"
41 #include "referenced_reel_asset.h"
42 #include "decoder_factory.h"
43 #include "decoder.h"
44 #include "video_decoder.h"
45 #include "audio_decoder.h"
46 #include "text_content.h"
47 #include "text_decoder.h"
48 #include "ffmpeg_content.h"
49 #include "audio_content.h"
50 #include "dcp_decoder.h"
51 #include "image_decoder.h"
52 #include "compose.hpp"
53 #include "shuffler.h"
54 #include "timer.h"
55 #include <dcp/reel.h>
56 #include <dcp/reel_sound_asset.h>
57 #include <dcp/reel_subtitle_asset.h>
58 #include <dcp/reel_picture_asset.h>
59 #include <dcp/reel_closed_caption_asset.h>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66
67 using std::copy;
68 using std::cout;
69 using std::dynamic_pointer_cast;
70 using std::list;
71 using std::make_pair;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::make_shared;
82 using boost::optional;
83 using boost::scoped_ptr;
84 #if BOOST_VERSION >= 106100
85 using namespace boost::placeholders;
86 #endif
87 using namespace dcpomatic;
88
89
90 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
91 int const PlayerProperty::PLAYLIST = 701;
92 int const PlayerProperty::FILM_CONTAINER = 702;
93 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
94 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
95 int const PlayerProperty::PLAYBACK_LENGTH = 705;
96
97
98 Player::Player (shared_ptr<const Film> film)
99         : _film (film)
100         , _suspended (0)
101         , _tolerant (film->tolerant())
102         , _audio_merger (_film->audio_frame_rate())
103 {
104         construct ();
105 }
106
107
108 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
109         : _film (film)
110         , _playlist (playlist_)
111         , _suspended (0)
112         , _tolerant (film->tolerant())
113         , _audio_merger (_film->audio_frame_rate())
114 {
115         construct ();
116 }
117
118
119 void
120 Player::construct ()
121 {
122         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
123         /* The butler must hear about this first, so since we are proxying this through to the butler we must
124            be first.
125         */
126         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
127         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
128         set_video_container_size (_film->frame_size ());
129
130         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
131
132         setup_pieces ();
133         seek (DCPTime (), true);
134 }
135
136
137 Player::~Player ()
138 {
139         delete _shuffler;
140 }
141
142
143 void
144 Player::setup_pieces ()
145 {
146         boost::mutex::scoped_lock lm (_mutex);
147         setup_pieces_unlocked ();
148 }
149
150
151 bool
152 have_video (shared_ptr<const Content> content)
153 {
154         return static_cast<bool>(content->video) && content->video->use();
155 }
156
157
158 bool
159 have_audio (shared_ptr<const Content> content)
160 {
161         return static_cast<bool>(content->audio);
162 }
163
164
165 void
166 Player::setup_pieces_unlocked ()
167 {
168         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
169
170         auto old_pieces = _pieces;
171         _pieces.clear ();
172
173         delete _shuffler;
174         _shuffler = new Shuffler();
175         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
176
177         for (auto i: playlist()->content()) {
178
179                 if (!i->paths_valid ()) {
180                         continue;
181                 }
182
183                 if (_ignore_video && _ignore_audio && i->text.empty()) {
184                         /* We're only interested in text and this content has none */
185                         continue;
186                 }
187
188                 shared_ptr<Decoder> old_decoder;
189                 for (auto j: old_pieces) {
190                         if (j->content == i) {
191                                 old_decoder = j->decoder;
192                                 break;
193                         }
194                 }
195
196                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
197                 DCPOMATIC_ASSERT (decoder);
198
199                 FrameRateChange frc (_film, i);
200
201                 if (decoder->video && _ignore_video) {
202                         decoder->video->set_ignore (true);
203                 }
204
205                 if (decoder->audio && _ignore_audio) {
206                         decoder->audio->set_ignore (true);
207                 }
208
209                 if (_ignore_text) {
210                         for (auto i: decoder->text) {
211                                 i->set_ignore (true);
212                         }
213                 }
214
215                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
216                 if (dcp) {
217                         dcp->set_decode_referenced (_play_referenced);
218                         if (_play_referenced) {
219                                 dcp->set_forced_reduction (_dcp_decode_reduction);
220                         }
221                 }
222
223                 auto piece = make_shared<Piece>(i, decoder, frc);
224                 _pieces.push_back (piece);
225
226                 if (decoder->video) {
227                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
228                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
229                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
230                         } else {
231                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
232                         }
233                 }
234
235                 if (decoder->audio) {
236                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
237                 }
238
239                 auto j = decoder->text.begin();
240
241                 while (j != decoder->text.end()) {
242                         (*j)->BitmapStart.connect (
243                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
244                                 );
245                         (*j)->PlainStart.connect (
246                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->Stop.connect (
249                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251
252                         ++j;
253                 }
254
255                 if (decoder->atmos) {
256                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
257                 }
258         }
259
260         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
261                 if ((*i)->use_video() && (*i)->content->video->frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->content->video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
262                         /* Look for content later in the content list with in-use video that overlaps this */
263                         auto period = DCPTimePeriod((*i)->position(), (*i)->content->end(_film));
264                         auto j = i;
265                         ++j;
266                         for (; j != _pieces.end(); ++j) {
267                                 if ((*j)->use_video()) {
268                                         (*i)->ignore_video = DCPTimePeriod((*j)->position(), (*j)->content->end(_film)).overlap(period);
269                                 }
270                         }
271                 }
272         }
273
274         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
275         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
276
277         _last_video_time = boost::optional<dcpomatic::DCPTime>();
278         _last_video_eyes = Eyes::BOTH;
279         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
280 }
281
282
283 void
284 Player::playlist_content_change (ChangeType type, int property, bool frequent)
285 {
286         if (property == VideoContentProperty::CROP) {
287                 if (type == ChangeType::DONE) {
288                         auto const vcs = video_container_size();
289                         boost::mutex::scoped_lock lm (_mutex);
290                         for (auto const& i: _delay) {
291                                 i.first->reset_metadata (_film, vcs);
292                         }
293                 }
294         } else {
295                 if (type == ChangeType::PENDING) {
296                         /* The player content is probably about to change, so we can't carry on
297                            until that has happened and we've rebuilt our pieces.  Stop pass()
298                            and seek() from working until then.
299                         */
300                         ++_suspended;
301                 } else if (type == ChangeType::DONE) {
302                         /* A change in our content has gone through.  Re-build our pieces. */
303                         setup_pieces ();
304                         --_suspended;
305                 } else if (type == ChangeType::CANCELLED) {
306                         --_suspended;
307                 }
308         }
309
310         Change (type, property, frequent);
311 }
312
313
314 void
315 Player::set_video_container_size (dcp::Size s)
316 {
317         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
318
319         {
320                 boost::mutex::scoped_lock lm (_mutex);
321
322                 if (s == _video_container_size) {
323                         lm.unlock ();
324                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
325                         return;
326                 }
327
328                 _video_container_size = s;
329
330                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
331                 _black_image->make_black ();
332         }
333
334         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
335 }
336
337
338 void
339 Player::playlist_change (ChangeType type)
340 {
341         if (type == ChangeType::DONE) {
342                 setup_pieces ();
343         }
344         Change (type, PlayerProperty::PLAYLIST, false);
345 }
346
347
348 void
349 Player::film_change (ChangeType type, Film::Property p)
350 {
351         /* Here we should notice Film properties that affect our output, and
352            alert listeners that our output now would be different to how it was
353            last time we were run.
354         */
355
356         if (p == Film::Property::CONTAINER) {
357                 Change (type, PlayerProperty::FILM_CONTAINER, false);
358         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
359                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
360                    so we need new pieces here.
361                 */
362                 if (type == ChangeType::DONE) {
363                         setup_pieces ();
364                 }
365                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
366         } else if (p == Film::Property::AUDIO_PROCESSOR) {
367                 if (type == ChangeType::DONE && _film->audio_processor ()) {
368                         boost::mutex::scoped_lock lm (_mutex);
369                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
370                 }
371         } else if (p == Film::Property::AUDIO_CHANNELS) {
372                 if (type == ChangeType::DONE) {
373                         boost::mutex::scoped_lock lm (_mutex);
374                         _audio_merger.clear ();
375                 }
376         }
377 }
378
379
380 shared_ptr<PlayerVideo>
381 Player::black_player_video_frame (Eyes eyes) const
382 {
383         return std::make_shared<PlayerVideo> (
384                 std::make_shared<const RawImageProxy>(_black_image),
385                 Crop(),
386                 optional<double>(),
387                 _video_container_size,
388                 _video_container_size,
389                 eyes,
390                 Part::WHOLE,
391                 PresetColourConversion::all().front().conversion,
392                 VideoRange::FULL,
393                 std::weak_ptr<Content>(),
394                 boost::optional<Frame>(),
395                 false
396         );
397 }
398
399
400 vector<FontData>
401 Player::get_subtitle_fonts ()
402 {
403         boost::mutex::scoped_lock lm (_mutex);
404
405         vector<FontData> fonts;
406         for (auto i: _pieces) {
407                 /* XXX: things may go wrong if there are duplicate font IDs
408                    with different font files.
409                 */
410                 auto f = i->decoder->fonts ();
411                 copy (f.begin(), f.end(), back_inserter(fonts));
412         }
413
414         return fonts;
415 }
416
417
418 /** Set this player never to produce any video data */
419 void
420 Player::set_ignore_video ()
421 {
422         boost::mutex::scoped_lock lm (_mutex);
423         _ignore_video = true;
424         setup_pieces_unlocked ();
425 }
426
427
428 void
429 Player::set_ignore_audio ()
430 {
431         boost::mutex::scoped_lock lm (_mutex);
432         _ignore_audio = true;
433         setup_pieces_unlocked ();
434 }
435
436
437 void
438 Player::set_ignore_text ()
439 {
440         boost::mutex::scoped_lock lm (_mutex);
441         _ignore_text = true;
442         setup_pieces_unlocked ();
443 }
444
445
446 /** Set the player to always burn open texts into the image regardless of the content settings */
447 void
448 Player::set_always_burn_open_subtitles ()
449 {
450         boost::mutex::scoped_lock lm (_mutex);
451         _always_burn_open_subtitles = true;
452 }
453
454
455 /** Sets up the player to be faster, possibly at the expense of quality */
456 void
457 Player::set_fast ()
458 {
459         boost::mutex::scoped_lock lm (_mutex);
460         _fast = true;
461         setup_pieces_unlocked ();
462 }
463
464
465 void
466 Player::set_play_referenced ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _play_referenced = true;
470         setup_pieces_unlocked ();
471 }
472
473
474 static void
475 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
476 {
477         DCPOMATIC_ASSERT (r);
478         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
479         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
480         if (r->actual_duration() > 0) {
481                 a.push_back (
482                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
483                         );
484         }
485 }
486
487
488 list<ReferencedReelAsset>
489 Player::get_reel_assets ()
490 {
491         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
492
493         list<ReferencedReelAsset> a;
494
495         for (auto i: playlist()->content()) {
496                 auto j = dynamic_pointer_cast<DCPContent> (i);
497                 if (!j) {
498                         continue;
499                 }
500
501                 scoped_ptr<DCPDecoder> decoder;
502                 try {
503                         decoder.reset (new DCPDecoder(_film, j, false, false, shared_ptr<DCPDecoder>()));
504                 } catch (...) {
505                         return a;
506                 }
507
508                 DCPOMATIC_ASSERT (j->video_frame_rate ());
509                 double const cfr = j->video_frame_rate().get();
510                 Frame const trim_start = j->trim_start().frames_round (cfr);
511                 Frame const trim_end = j->trim_end().frames_round (cfr);
512                 int const ffr = _film->video_frame_rate ();
513
514                 /* position in the asset from the start */
515                 int64_t offset_from_start = 0;
516                 /* position in the asset from the end */
517                 int64_t offset_from_end = 0;
518                 for (auto k: decoder->reels()) {
519                         /* Assume that main picture duration is the length of the reel */
520                         offset_from_end += k->main_picture()->actual_duration();
521                 }
522
523                 for (auto k: decoder->reels()) {
524
525                         /* Assume that main picture duration is the length of the reel */
526                         int64_t const reel_duration = k->main_picture()->actual_duration();
527
528                         /* See doc/design/trim_reels.svg */
529                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
530                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
531
532                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
533                         if (j->reference_video ()) {
534                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
535                         }
536
537                         if (j->reference_audio ()) {
538                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
539                         }
540
541                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
542                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
543                         }
544
545                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
546                                 for (auto l: k->closed_captions()) {
547                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
548                                 }
549                         }
550
551                         offset_from_start += reel_duration;
552                         offset_from_end -= reel_duration;
553                 }
554         }
555
556         return a;
557 }
558
559
560 bool
561 Player::pass ()
562 {
563         boost::mutex::scoped_lock lm (_mutex);
564
565         if (_suspended) {
566                 /* We can't pass in this state */
567                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
568                 return false;
569         }
570
571         if (_playback_length == DCPTime()) {
572                 /* Special; just give one black frame */
573                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
574                 return true;
575         }
576
577         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
578
579         shared_ptr<Piece> earliest_content;
580         optional<DCPTime> earliest_time;
581
582         for (auto i: _pieces) {
583                 if (i->done) {
584                         continue;
585                 }
586
587                 auto const t = i->content_time_to_dcp (max(i->decoder->position(), i->content->trim_start()));
588                 if (t > i->content->end(_film)) {
589                         i->done = true;
590                 } else {
591
592                         /* Given two choices at the same time, pick the one with texts so we see it before
593                            the video.
594                         */
595                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
596                                 earliest_time = t;
597                                 earliest_content = i;
598                         }
599                 }
600         }
601
602         bool done = false;
603
604         enum {
605                 NONE,
606                 CONTENT,
607                 BLACK,
608                 SILENT
609         } which = NONE;
610
611         if (earliest_content) {
612                 which = CONTENT;
613         }
614
615         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
616                 earliest_time = _black.position ();
617                 which = BLACK;
618         }
619
620         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
621                 earliest_time = _silent.position ();
622                 which = SILENT;
623         }
624
625         switch (which) {
626         case CONTENT:
627         {
628                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
629                 earliest_content->done = earliest_content->decoder->pass ();
630                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
631                 if (dcp && !_play_referenced && dcp->reference_audio()) {
632                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
633                            to `hide' the fact that no audio was emitted during the referenced DCP (though
634                            we need to behave as though it was).
635                         */
636                         _last_audio_time = dcp->end (_film);
637                 }
638                 break;
639         }
640         case BLACK:
641                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
642                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
643                 _black.set_position (_black.position() + one_video_frame());
644                 break;
645         case SILENT:
646         {
647                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
648                 DCPTimePeriod period (_silent.period_at_position());
649                 if (_last_audio_time) {
650                         /* Sometimes the thing that happened last finishes fractionally before
651                            or after this silence.  Bodge the start time of the silence to fix it.
652                            I think this is nothing to worry about since we will just add or
653                            remove a little silence at the end of some content.
654                         */
655                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
656                         /* Let's not worry about less than a frame at 24fps */
657                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
658                         if (error >= too_much_error) {
659                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
660                         }
661                         DCPOMATIC_ASSERT (error < too_much_error);
662                         period.from = *_last_audio_time;
663                 }
664                 if (period.duration() > one_video_frame()) {
665                         period.to = period.from + one_video_frame();
666                 }
667                 fill_audio (period);
668                 _silent.set_position (period.to);
669                 break;
670         }
671         case NONE:
672                 done = true;
673                 break;
674         }
675
676         /* Emit any audio that is ready */
677
678         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
679            of our streams, or the position of the _silent.
680         */
681         auto pull_to = _playback_length;
682         for (auto i: _pieces) {
683                 i->update_pull_to (pull_to);
684         }
685         if (!_silent.done() && _silent.position() < pull_to) {
686                 pull_to = _silent.position();
687         }
688
689         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
690         auto audio = _audio_merger.pull (pull_to);
691         for (auto i = audio.begin(); i != audio.end(); ++i) {
692                 if (_last_audio_time && i->second < *_last_audio_time) {
693                         /* This new data comes before the last we emitted (or the last seek); discard it */
694                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
695                         if (!cut.first) {
696                                 continue;
697                         }
698                         *i = cut;
699                 } else if (_last_audio_time && i->second > *_last_audio_time) {
700                         /* There's a gap between this data and the last we emitted; fill with silence */
701                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
702                 }
703
704                 emit_audio (i->first, i->second);
705         }
706
707         if (done) {
708                 _shuffler->flush ();
709                 for (auto const& i: _delay) {
710                         do_emit_video(i.first, i.second);
711                 }
712         }
713
714         return done;
715 }
716
717
718 /** @return Open subtitles for the frame at the given time, converted to images */
719 optional<PositionImage>
720 Player::open_subtitles_for_frame (DCPTime time) const
721 {
722         list<PositionImage> captions;
723         int const vfr = _film->video_frame_rate();
724
725         for (
726                 auto j:
727                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
728                 ) {
729
730                 /* Bitmap subtitles */
731                 for (auto i: j.bitmap) {
732                         if (!i.image) {
733                                 continue;
734                         }
735
736                         /* i.image will already have been scaled to fit _video_container_size */
737                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
738
739                         captions.push_back (
740                                 PositionImage (
741                                         i.image,
742                                         Position<int> (
743                                                 lrint(_video_container_size.width * i.rectangle.x),
744                                                 lrint(_video_container_size.height * i.rectangle.y)
745                                                 )
746                                         )
747                                 );
748                 }
749
750                 /* String subtitles (rendered to an image) */
751                 if (!j.string.empty()) {
752                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
753                         copy (s.begin(), s.end(), back_inserter (captions));
754                 }
755         }
756
757         if (captions.empty()) {
758                 return {};
759         }
760
761         return merge (captions);
762 }
763
764
765 void
766 Player::video (weak_ptr<Piece> wp, ContentVideo video)
767 {
768         auto piece = wp.lock ();
769         if (!piece) {
770                 return;
771         }
772
773         if (!piece->use_video()) {
774                 return;
775         }
776
777         auto frc = piece->frame_rate_change();
778         if (frc.skip && (video.frame % 2) == 1) {
779                 return;
780         }
781
782         /* Time of the first frame we will emit */
783         DCPTime const time = piece->content_video_to_dcp (video.frame);
784         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
785
786         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
787            if it's after the content's period here as in that case we still need to fill any gap between
788            `now' and the end of the content's period.
789         */
790         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
791                 return;
792         }
793
794         if (piece->ignore_video && piece->ignore_video->contains(time)) {
795                 return;
796         }
797
798         /* Fill gaps that we discover now that we have some video which needs to be emitted.
799            This is where we need to fill to.
800         */
801         DCPTime fill_to = min (time, piece->content->end(_film));
802
803         if (_last_video_time) {
804                 DCPTime fill_from = max (*_last_video_time, piece->position());
805
806                 /* Fill if we have more than half a frame to do */
807                 if ((fill_to - fill_from) > one_video_frame() / 2) {
808                         auto last = _last_video.find (wp);
809                         if (_film->three_d()) {
810                                 auto fill_to_eyes = video.eyes;
811                                 if (fill_to_eyes == Eyes::BOTH) {
812                                         fill_to_eyes = Eyes::LEFT;
813                                 }
814                                 if (fill_to == piece->content->end(_film)) {
815                                         /* Don't fill after the end of the content */
816                                         fill_to_eyes = Eyes::LEFT;
817                                 }
818                                 auto j = fill_from;
819                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
820                                 if (eyes == Eyes::BOTH) {
821                                         eyes = Eyes::LEFT;
822                                 }
823                                 while (j < fill_to || eyes != fill_to_eyes) {
824                                         if (last != _last_video.end()) {
825                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
826                                                 auto copy = last->second->shallow_copy();
827                                                 copy->set_eyes (eyes);
828                                                 emit_video (copy, j);
829                                         } else {
830                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
831                                                 emit_video (black_player_video_frame(eyes), j);
832                                         }
833                                         if (eyes == Eyes::RIGHT) {
834                                                 j += one_video_frame();
835                                         }
836                                         eyes = increment_eyes (eyes);
837                                 }
838                         } else {
839                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
840                                         if (last != _last_video.end()) {
841                                                 emit_video (last->second, j);
842                                         } else {
843                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
844                                         }
845                                 }
846                         }
847                 }
848         }
849
850         _last_video[wp] = std::make_shared<PlayerVideo>(
851                 video.image,
852                 piece->content->video->crop (),
853                 piece->content->video->fade (_film, video.frame),
854                 scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
855                 _video_container_size,
856                 video.eyes,
857                 video.part,
858                 piece->content->video->colour_conversion(),
859                 piece->content->video->range(),
860                 piece->content,
861                 video.frame,
862                 false
863                 );
864
865         DCPTime t = time;
866         for (int i = 0; i < frc.repeat; ++i) {
867                 if (t < piece->content->end(_film)) {
868                         emit_video (_last_video[wp], t);
869                 }
870                 t += one_video_frame ();
871         }
872 }
873
874
875 void
876 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
877 {
878         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
879
880         auto piece = wp.lock ();
881         if (!piece) {
882                 return;
883         }
884
885         auto content = piece->content->audio;
886         DCPOMATIC_ASSERT (content);
887
888         int const rfr = content->resampled_frame_rate (_film);
889
890         /* Compute time in the DCP */
891         auto time = piece->resampled_audio_to_dcp (content_audio.frame, _film);
892         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
893
894         /* And the end of this block in the DCP */
895         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
896
897         /* Remove anything that comes before the start or after the end of the content */
898         if (time < piece->position()) {
899                 auto cut = discard_audio (content_audio.audio, time, piece->position());
900                 if (!cut.first) {
901                         /* This audio is entirely discarded */
902                         return;
903                 }
904                 content_audio.audio = cut.first;
905                 time = cut.second;
906         } else if (time > piece->content->end(_film)) {
907                 /* Discard it all */
908                 return;
909         } else if (end > piece->content->end(_film)) {
910                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
911                 if (remaining_frames == 0) {
912                         return;
913                 }
914                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
915         }
916
917         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
918
919         /* Gain */
920
921         if (content->gain() != 0) {
922                 auto gain = make_shared<AudioBuffers>(content_audio.audio);
923                 gain->apply_gain (content->gain());
924                 content_audio.audio = gain;
925         }
926
927         /* Remap */
928
929         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
930
931         /* Process */
932
933         if (_audio_processor) {
934                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
935         }
936
937         /* Push */
938
939         _audio_merger.push (content_audio.audio, time);
940         piece->set_last_push_end (stream, time + DCPTime::from_frames(content_audio.audio->frames(), _film->audio_frame_rate()));
941 }
942
943
944 void
945 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
946 {
947         auto piece = wp.lock ();
948         auto text = wc.lock ();
949         if (!piece || !text) {
950                 return;
951         }
952
953         /* Apply content's subtitle offsets */
954         subtitle.sub.rectangle.x += text->x_offset ();
955         subtitle.sub.rectangle.y += text->y_offset ();
956
957         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
958         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
959         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
960
961         /* Apply content's subtitle scale */
962         subtitle.sub.rectangle.width *= text->x_scale ();
963         subtitle.sub.rectangle.height *= text->y_scale ();
964
965         PlayerText ps;
966         auto image = subtitle.sub.image;
967
968         /* We will scale the subtitle up to fit _video_container_size */
969         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
970         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
971         if (width == 0 || height == 0) {
972                 return;
973         }
974
975         dcp::Size scaled_size (width, height);
976         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
977         DCPTime from (piece->content_time_to_dcp(subtitle.from()));
978
979         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
980 }
981
982
983 void
984 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
985 {
986         auto piece = wp.lock ();
987         auto text = wc.lock ();
988         if (!piece || !text) {
989                 return;
990         }
991
992         PlayerText ps;
993         DCPTime const from (piece->content_time_to_dcp(subtitle.from()));
994
995         if (from > piece->content->end(_film)) {
996                 return;
997         }
998
999         for (auto s: subtitle.subs) {
1000                 s.set_h_position (s.h_position() + text->x_offset ());
1001                 s.set_v_position (s.v_position() + text->y_offset ());
1002                 float const xs = text->x_scale();
1003                 float const ys = text->y_scale();
1004                 float size = s.size();
1005
1006                 /* Adjust size to express the common part of the scaling;
1007                    e.g. if xs = ys = 0.5 we scale size by 2.
1008                 */
1009                 if (xs > 1e-5 && ys > 1e-5) {
1010                         size *= 1 / min (1 / xs, 1 / ys);
1011                 }
1012                 s.set_size (size);
1013
1014                 /* Then express aspect ratio changes */
1015                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1016                         s.set_aspect_adjust (xs / ys);
1017                 }
1018
1019                 s.set_in (dcp::Time(from.seconds(), 1000));
1020                 ps.string.push_back (StringText (s, text->outline_width()));
1021                 ps.add_fonts (text->fonts ());
1022         }
1023
1024         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1025 }
1026
1027
1028 void
1029 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1030 {
1031         auto text = wc.lock ();
1032         if (!text) {
1033                 return;
1034         }
1035
1036         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1037                 return;
1038         }
1039
1040         shared_ptr<Piece> piece = wp.lock ();
1041         if (!piece) {
1042                 return;
1043         }
1044
1045         auto const dcp_to = piece->content_time_to_dcp(to);
1046
1047         if (dcp_to > piece->content->end(_film)) {
1048                 return;
1049         }
1050
1051         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1052
1053         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1054         if (text->use() && !always && !text->burn()) {
1055                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1056         }
1057 }
1058
1059
1060 void
1061 Player::seek (DCPTime time, bool accurate)
1062 {
1063         boost::mutex::scoped_lock lm (_mutex);
1064         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1065
1066         if (_suspended) {
1067                 /* We can't seek in this state */
1068                 return;
1069         }
1070
1071         if (_shuffler) {
1072                 _shuffler->clear ();
1073         }
1074
1075         _delay.clear ();
1076
1077         if (_audio_processor) {
1078                 _audio_processor->flush ();
1079         }
1080
1081         _audio_merger.clear ();
1082         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1083                 _active_texts[i].clear ();
1084         }
1085
1086         for (auto i: _pieces) {
1087                 if (time < i->position()) {
1088                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1089                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1090                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1091                            been trimmed to a point between keyframes, or something).
1092                         */
1093                         i->decoder->seek (i->dcp_to_content_time(i->position(), _film), true);
1094                         i->done = false;
1095                 } else if (i->position() <= time && time < i->content->end(_film)) {
1096                         /* During; seek to position */
1097                         i->decoder->seek (i->dcp_to_content_time(time, _film), accurate);
1098                         i->done = false;
1099                 } else {
1100                         /* After; this piece is done */
1101                         i->done = true;
1102                 }
1103         }
1104
1105         if (accurate) {
1106                 _last_video_time = time;
1107                 _last_video_eyes = Eyes::LEFT;
1108                 _last_audio_time = time;
1109         } else {
1110                 _last_video_time = optional<DCPTime>();
1111                 _last_video_eyes = optional<Eyes>();
1112                 _last_audio_time = optional<DCPTime>();
1113         }
1114
1115         _black.set_position (time);
1116         _silent.set_position (time);
1117
1118         _last_video.clear ();
1119 }
1120
1121
1122 void
1123 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1124 {
1125         if (!_film->three_d()) {
1126                 if (pv->eyes() == Eyes::LEFT) {
1127                         /* Use left-eye images for both eyes... */
1128                         pv->set_eyes (Eyes::BOTH);
1129                 } else if (pv->eyes() == Eyes::RIGHT) {
1130                         /* ...and discard the right */
1131                         return;
1132                 }
1133         }
1134
1135         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1136            player before the video that requires them.
1137         */
1138         _delay.push_back (make_pair (pv, time));
1139
1140         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1141                 _last_video_time = time + one_video_frame();
1142         }
1143         _last_video_eyes = increment_eyes (pv->eyes());
1144
1145         if (_delay.size() < 3) {
1146                 return;
1147         }
1148
1149         auto to_do = _delay.front();
1150         _delay.pop_front();
1151         do_emit_video (to_do.first, to_do.second);
1152 }
1153
1154
1155 void
1156 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1157 {
1158         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1159                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1160                         _active_texts[i].clear_before (time);
1161                 }
1162         }
1163
1164         auto subtitles = open_subtitles_for_frame (time);
1165         if (subtitles) {
1166                 pv->set_text (subtitles.get ());
1167         }
1168
1169         Video (pv, time);
1170 }
1171
1172
1173 void
1174 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1175 {
1176         /* Log if the assert below is about to fail */
1177         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1178                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1179         }
1180
1181         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1182         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1183         Audio (data, time, _film->audio_frame_rate());
1184         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1185 }
1186
1187
1188 void
1189 Player::fill_audio (DCPTimePeriod period)
1190 {
1191         if (period.from == period.to) {
1192                 return;
1193         }
1194
1195         DCPOMATIC_ASSERT (period.from < period.to);
1196
1197         DCPTime t = period.from;
1198         while (t < period.to) {
1199                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1200                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1201                 if (samples) {
1202                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1203                         silence->make_silent ();
1204                         emit_audio (silence, t);
1205                 }
1206                 t += block;
1207         }
1208 }
1209
1210
1211 DCPTime
1212 Player::one_video_frame () const
1213 {
1214         return DCPTime::from_frames (1, _film->video_frame_rate ());
1215 }
1216
1217
1218 pair<shared_ptr<AudioBuffers>, DCPTime>
1219 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1220 {
1221         auto const discard_time = discard_to - time;
1222         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1223         auto remaining_frames = audio->frames() - discard_frames;
1224         if (remaining_frames <= 0) {
1225                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1226         }
1227         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1228         return make_pair(cut, time + discard_time);
1229 }
1230
1231
1232 void
1233 Player::set_dcp_decode_reduction (optional<int> reduction)
1234 {
1235         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1236
1237         {
1238                 boost::mutex::scoped_lock lm (_mutex);
1239
1240                 if (reduction == _dcp_decode_reduction) {
1241                         lm.unlock ();
1242                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1243                         return;
1244                 }
1245
1246                 _dcp_decode_reduction = reduction;
1247                 setup_pieces_unlocked ();
1248         }
1249
1250         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1251 }
1252
1253
1254 optional<DCPTime>
1255 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1256 {
1257         boost::mutex::scoped_lock lm (_mutex);
1258
1259         for (auto i: _pieces) {
1260                 if (i->content == content) {
1261                         return i->content_time_to_dcp(t);
1262                 }
1263         }
1264
1265         /* We couldn't find this content; perhaps things are being changed over */
1266         return {};
1267 }
1268
1269
1270 shared_ptr<const Playlist>
1271 Player::playlist () const
1272 {
1273         return _playlist ? _playlist : _film->playlist();
1274 }
1275
1276
1277 void
1278 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1279 {
1280         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1281 }
1282