Add collect() for Player.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "dcp_content.h"
30 #include "dcp_decoder.h"
31 #include "dcpomatic_log.h"
32 #include "decoder.h"
33 #include "decoder_factory.h"
34 #include "ffmpeg_content.h"
35 #include "film.h"
36 #include "frame_rate_change.h"
37 #include "image.h"
38 #include "image_decoder.h"
39 #include "job.h"
40 #include "log.h"
41 #include "piece_video.h"
42 #include "player.h"
43 #include "player_video.h"
44 #include "playlist.h"
45 #include "ratio.h"
46 #include "raw_image_proxy.h"
47 #include "referenced_reel_asset.h"
48 #include "render_text.h"
49 #include "shuffler.h"
50 #include "text_content.h"
51 #include "text_decoder.h"
52 #include "timer.h"
53 #include "video_decoder.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <stdint.h>
60 #include <algorithm>
61 #include <iostream>
62
63 #include "i18n.h"
64
65
66 using std::copy;
67 using std::cout;
68 using std::dynamic_pointer_cast;
69 using std::list;
70 using std::make_pair;
71 using std::make_shared;
72 using std::make_shared;
73 using std::map;
74 using std::max;
75 using std::min;
76 using std::min;
77 using std::pair;
78 using std::shared_ptr;
79 using std::vector;
80 using std::weak_ptr;
81 using std::unique_ptr;
82 using boost::optional;
83 #if BOOST_VERSION >= 106100
84 using namespace boost::placeholders;
85 #endif
86 using namespace dcpomatic;
87
88
89 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
90 int const PlayerProperty::PLAYLIST = 701;
91 int const PlayerProperty::FILM_CONTAINER = 702;
92 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
93 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
94 int const PlayerProperty::PLAYBACK_LENGTH = 705;
95
96
97 /** About 0.01dB */
98 #define AUDIO_GAIN_EPSILON 0.001
99
100
101 Player::Player (shared_ptr<const Film> film)
102         : _film (film)
103         , _suspended (0)
104         , _tolerant (film->tolerant())
105         , _audio_merger (_film->audio_frame_rate())
106 {
107         construct ();
108 }
109
110
111 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
112         : _film (film)
113         , _playlist (playlist_)
114         , _suspended (0)
115         , _tolerant (film->tolerant())
116         , _audio_merger (_film->audio_frame_rate())
117 {
118         construct ();
119 }
120
121
122 void
123 Player::construct ()
124 {
125         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
126         /* The butler must hear about this first, so since we are proxying this through to the butler we must
127            be first.
128         */
129         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
130         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
131         set_video_container_size (_film->frame_size ());
132
133         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
134
135         setup_pieces ();
136         seek (DCPTime (), true);
137 }
138
139
140 void
141 Player::setup_pieces ()
142 {
143         boost::mutex::scoped_lock lm (_mutex);
144         setup_pieces_unlocked ();
145 }
146
147
148 bool
149 have_video (shared_ptr<const Content> content)
150 {
151         return static_cast<bool>(content->video) && content->video->use();
152 }
153
154
155 bool
156 have_audio (shared_ptr<const Content> content)
157 {
158         return static_cast<bool>(content->audio);
159 }
160
161
162 vector<vector<shared_ptr<Content>>>
163 collect (shared_ptr<const Film> film, ContentList content)
164 {
165         vector<shared_ptr<Content>> ungrouped;
166         vector<vector<shared_ptr<Content>>> grouped;
167
168         auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
169
170                 auto a_streams = a->streams();
171                 auto b_streams = b->streams();
172
173                 if (a_streams.size() != b_streams.size()) {
174                         return false;
175                 }
176
177                 for (size_t i = 0; i < a_streams.size(); ++i) {
178                         auto a_stream = a_streams[i];
179                         auto b_stream = b_streams[i];
180                         if (
181                                 !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
182                                 a_stream->frame_rate() != b_stream->frame_rate() ||
183                                 a_stream->channels() != b_stream->channels()) {
184                                 return false;
185                         }
186                 }
187
188                 return (
189                         fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
190                         a->delay() == b->delay() &&
191                         a->language() == b->language() &&
192                         a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
193                         a->channel_names() == b->channel_names()
194                        );
195         };
196
197         for (auto i: content) {
198                 if (i->video || !i->audio || !i->text.empty()) {
199                         ungrouped.push_back (i);
200                 } else {
201                         bool done = false;
202                         for (auto& g: grouped) {
203                                 if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
204                                         g.push_back (i);
205                                         done = true;
206                                 }
207                         }
208                         if (!done) {
209                                 grouped.push_back ({i});
210                         }
211                 }
212         }
213
214         for (auto i: ungrouped) {
215                 grouped.push_back({i});
216         }
217
218         return grouped;
219 }
220
221
222 void
223 Player::setup_pieces_unlocked ()
224 {
225         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
226
227         auto old_pieces = _pieces;
228         _pieces.clear ();
229
230         _shuffler.reset (new Shuffler());
231         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
232
233         for (auto i: playlist()->content()) {
234
235                 if (!i->paths_valid ()) {
236                         continue;
237                 }
238
239                 if (_ignore_video && _ignore_audio && i->text.empty()) {
240                         /* We're only interested in text and this content has none */
241                         continue;
242                 }
243
244                 shared_ptr<Decoder> old_decoder;
245                 for (auto j: old_pieces) {
246                         auto decoder = j->decoder_for(i);
247                         if (decoder) {
248                                 old_decoder = decoder;
249                                 break;
250                         }
251                 }
252
253                 auto decoder = decoder_factory (_film, i, _tolerant, old_decoder);
254                 DCPOMATIC_ASSERT (decoder);
255
256                 FrameRateChange frc (_film, i);
257
258                 if (decoder->video && _ignore_video) {
259                         decoder->video->set_ignore (true);
260                 }
261
262                 if (decoder->audio && _ignore_audio) {
263                         decoder->audio->set_ignore (true);
264                 }
265
266                 if (_ignore_text) {
267                         for (auto i: decoder->text) {
268                                 i->set_ignore (true);
269                         }
270                 }
271
272                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
273                 if (dcp) {
274                         dcp->set_decode_referenced (_play_referenced);
275                         if (_play_referenced) {
276                                 dcp->set_forced_reduction (_dcp_decode_reduction);
277                         }
278                 }
279
280                 auto piece = make_shared<Piece>(_film, i, decoder, frc, _fast);
281                 _pieces.push_back (piece);
282
283                 if (i->video) {
284                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
285                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
286                                 piece->Video.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
287                         } else {
288                                 piece->Video.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
289                         }
290                 }
291
292                 if (i->audio) {
293                         piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
294                 }
295
296                 auto j = decoder->text.begin();
297
298                 while (j != decoder->text.end()) {
299                         (*j)->BitmapStart.connect (
300                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
301                                 );
302                         (*j)->PlainStart.connect (
303                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
304                                 );
305                         (*j)->Stop.connect (
306                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
307                                 );
308
309                         ++j;
310                 }
311
312                 if (decoder->atmos) {
313                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
314                 }
315         }
316
317         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
318                 if ((*i)->use_video() && (*i)->video_frame_type() != VideoFrameType::THREE_D_LEFT && (*i)->video_frame_type() != VideoFrameType::THREE_D_RIGHT) {
319                         /* Look for content later in the content list with in-use video that overlaps this */
320                         for (auto j = std::next(i); j != _pieces.end(); ++j) {
321                                 if ((*j)->use_video()) {
322                                         (*i)->set_ignore_video ((*j)->period().overlap((*i)->period()));
323                                 }
324                         }
325                 }
326         }
327
328         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
329         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
330
331         _last_video_time = boost::optional<dcpomatic::DCPTime>();
332         _last_video_eyes = Eyes::BOTH;
333         _last_audio_time = boost::optional<dcpomatic::DCPTime>();
334 }
335
336
337 optional<DCPTime>
338 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
339 {
340         boost::mutex::scoped_lock lm (_mutex);
341
342         for (auto i: _pieces) {
343                 auto dcp = i->content_time_to_dcp(content, t);
344                 if (dcp) {
345                         return *dcp;
346                 }
347         }
348
349         /* We couldn't find this content; perhaps things are being changed over */
350         return {};
351 }
352
353
354 void
355 Player::playlist_content_change (ChangeType type, int property, bool frequent)
356 {
357         if (property == VideoContentProperty::CROP) {
358                 if (type == ChangeType::DONE) {
359                         auto const vcs = video_container_size();
360                         boost::mutex::scoped_lock lm (_mutex);
361                         for (auto const& i: _delay) {
362                                 i.first->reset_metadata (_film, vcs);
363                         }
364                 }
365         } else {
366                 if (type == ChangeType::PENDING) {
367                         /* The player content is probably about to change, so we can't carry on
368                            until that has happened and we've rebuilt our pieces.  Stop pass()
369                            and seek() from working until then.
370                         */
371                         ++_suspended;
372                 } else if (type == ChangeType::DONE) {
373                         /* A change in our content has gone through.  Re-build our pieces. */
374                         setup_pieces ();
375                         --_suspended;
376                 } else if (type == ChangeType::CANCELLED) {
377                         --_suspended;
378                 }
379         }
380
381         Change (type, property, frequent);
382 }
383
384
385 void
386 Player::set_video_container_size (dcp::Size s)
387 {
388         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
389
390         {
391                 boost::mutex::scoped_lock lm (_mutex);
392
393                 if (s == _video_container_size) {
394                         lm.unlock ();
395                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
396                         return;
397                 }
398
399                 _video_container_size = s;
400
401                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
402                 _black_image->make_black ();
403         }
404
405         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
406 }
407
408
409 void
410 Player::playlist_change (ChangeType type)
411 {
412         if (type == ChangeType::DONE) {
413                 setup_pieces ();
414         }
415         Change (type, PlayerProperty::PLAYLIST, false);
416 }
417
418
419 void
420 Player::film_change (ChangeType type, Film::Property p)
421 {
422         /* Here we should notice Film properties that affect our output, and
423            alert listeners that our output now would be different to how it was
424            last time we were run.
425         */
426
427         if (p == Film::Property::CONTAINER) {
428                 Change (type, PlayerProperty::FILM_CONTAINER, false);
429         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
430                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
431                    so we need new pieces here.
432                 */
433                 if (type == ChangeType::DONE) {
434                         setup_pieces ();
435                 }
436                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
437         } else if (p == Film::Property::AUDIO_PROCESSOR) {
438                 if (type == ChangeType::DONE && _film->audio_processor ()) {
439                         boost::mutex::scoped_lock lm (_mutex);
440                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
441                 }
442         } else if (p == Film::Property::AUDIO_CHANNELS) {
443                 if (type == ChangeType::DONE) {
444                         boost::mutex::scoped_lock lm (_mutex);
445                         _audio_merger.clear ();
446                 }
447         }
448 }
449
450
451 shared_ptr<PlayerVideo>
452 Player::black_player_video_frame (Eyes eyes) const
453 {
454         return std::make_shared<PlayerVideo> (
455                 std::make_shared<const RawImageProxy>(_black_image),
456                 Crop(),
457                 optional<double>(),
458                 _video_container_size,
459                 _video_container_size,
460                 eyes,
461                 Part::WHOLE,
462                 PresetColourConversion::all().front().conversion,
463                 VideoRange::FULL,
464                 std::weak_ptr<Content>(),
465                 boost::optional<Frame>(),
466                 false
467         );
468 }
469
470
471 vector<FontData>
472 Player::get_subtitle_fonts ()
473 {
474         boost::mutex::scoped_lock lm (_mutex);
475
476         vector<FontData> fonts;
477         for (auto i: _pieces) {
478                 /* XXX: things may go wrong if there are duplicate font IDs
479                    with different font files.
480                 */
481                 auto f = i->fonts ();
482                 copy (f.begin(), f.end(), back_inserter(fonts));
483         }
484
485         return fonts;
486 }
487
488
489 /** Set this player never to produce any video data */
490 void
491 Player::set_ignore_video ()
492 {
493         boost::mutex::scoped_lock lm (_mutex);
494         _ignore_video = true;
495         setup_pieces_unlocked ();
496 }
497
498
499 void
500 Player::set_ignore_audio ()
501 {
502         boost::mutex::scoped_lock lm (_mutex);
503         _ignore_audio = true;
504         setup_pieces_unlocked ();
505 }
506
507
508 void
509 Player::set_ignore_text ()
510 {
511         boost::mutex::scoped_lock lm (_mutex);
512         _ignore_text = true;
513         setup_pieces_unlocked ();
514 }
515
516
517 /** Set the player to always burn open texts into the image regardless of the content settings */
518 void
519 Player::set_always_burn_open_subtitles ()
520 {
521         boost::mutex::scoped_lock lm (_mutex);
522         _always_burn_open_subtitles = true;
523 }
524
525
526 /** Sets up the player to be faster, possibly at the expense of quality */
527 void
528 Player::set_fast ()
529 {
530         boost::mutex::scoped_lock lm (_mutex);
531         _fast = true;
532         setup_pieces_unlocked ();
533 }
534
535
536 void
537 Player::set_play_referenced ()
538 {
539         boost::mutex::scoped_lock lm (_mutex);
540         _play_referenced = true;
541         setup_pieces_unlocked ();
542 }
543
544
545 static void
546 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
547 {
548         DCPOMATIC_ASSERT (r);
549         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
550         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
551         if (r->actual_duration() > 0) {
552                 a.push_back (
553                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
554                         );
555         }
556 }
557
558
559 list<ReferencedReelAsset>
560 Player::get_reel_assets ()
561 {
562         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
563
564         list<ReferencedReelAsset> a;
565
566         for (auto i: playlist()->content()) {
567                 auto j = dynamic_pointer_cast<DCPContent> (i);
568                 if (!j) {
569                         continue;
570                 }
571
572                 unique_ptr<DCPDecoder> decoder;
573                 try {
574                         decoder.reset (new DCPDecoder(_film, j, false, shared_ptr<DCPDecoder>()));
575                 } catch (...) {
576                         return a;
577                 }
578
579                 DCPOMATIC_ASSERT (j->video_frame_rate ());
580                 double const cfr = j->video_frame_rate().get();
581                 Frame const trim_start = j->trim_start().frames_round (cfr);
582                 Frame const trim_end = j->trim_end().frames_round (cfr);
583                 int const ffr = _film->video_frame_rate ();
584
585                 /* position in the asset from the start */
586                 int64_t offset_from_start = 0;
587                 /* position in the asset from the end */
588                 int64_t offset_from_end = 0;
589                 for (auto k: decoder->reels()) {
590                         /* Assume that main picture duration is the length of the reel */
591                         offset_from_end += k->main_picture()->actual_duration();
592                 }
593
594                 for (auto k: decoder->reels()) {
595
596                         /* Assume that main picture duration is the length of the reel */
597                         int64_t const reel_duration = k->main_picture()->actual_duration();
598
599                         /* See doc/design/trim_reels.svg */
600                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
601                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
602
603                         auto const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
604                         if (j->reference_video ()) {
605                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
606                         }
607
608                         if (j->reference_audio ()) {
609                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
610                         }
611
612                         if (j->reference_text (TextType::OPEN_SUBTITLE)) {
613                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
614                         }
615
616                         if (j->reference_text (TextType::CLOSED_CAPTION)) {
617                                 for (auto l: k->closed_captions()) {
618                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
619                                 }
620                         }
621
622                         offset_from_start += reel_duration;
623                         offset_from_end -= reel_duration;
624                 }
625         }
626
627         return a;
628 }
629
630
631 bool
632 Player::pass ()
633 {
634         boost::mutex::scoped_lock lm (_mutex);
635
636         if (_suspended) {
637                 /* We can't pass in this state */
638                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
639                 return false;
640         }
641
642         if (_playback_length == DCPTime()) {
643                 /* Special; just give one black frame */
644                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
645                 return true;
646         }
647
648         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
649
650         shared_ptr<Piece> earliest_content;
651         optional<DCPTime> earliest_time;
652
653         for (auto i: _pieces) {
654                 auto time = i->decoder_before(earliest_time);
655                 if (time) {
656                         earliest_time = *time;
657                         earliest_content = i;
658                 }
659         }
660
661         bool done = false;
662
663         enum {
664                 NONE,
665                 CONTENT,
666                 BLACK,
667                 SILENT
668         } which = NONE;
669
670         if (earliest_content) {
671                 which = CONTENT;
672         }
673
674         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
675                 earliest_time = _black.position ();
676                 which = BLACK;
677         }
678
679         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
680                 earliest_time = _silent.position ();
681                 which = SILENT;
682         }
683
684         switch (which) {
685         case CONTENT:
686         {
687                 earliest_content->pass();
688                 if (!_play_referenced && earliest_content->reference_dcp_audio()) {
689                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
690                            to `hide' the fact that no audio was emitted during the referenced DCP (though
691                            we need to behave as though it was).
692                         */
693                         _last_audio_time = earliest_content->end ();
694                 }
695                 break;
696         }
697         case BLACK:
698                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
699                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
700                 _black.set_position (_black.position() + one_video_frame());
701                 break;
702         case SILENT:
703         {
704                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
705                 DCPTimePeriod period (_silent.period_at_position());
706                 if (_last_audio_time) {
707                         /* Sometimes the thing that happened last finishes fractionally before
708                            or after this silence.  Bodge the start time of the silence to fix it.
709                            I think this is nothing to worry about since we will just add or
710                            remove a little silence at the end of some content.
711                         */
712                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
713                         /* Let's not worry about less than a frame at 24fps */
714                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
715                         if (error >= too_much_error) {
716                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
717                         }
718                         DCPOMATIC_ASSERT (error < too_much_error);
719                         period.from = *_last_audio_time;
720                 }
721                 if (period.duration() > one_video_frame()) {
722                         period.to = period.from + one_video_frame();
723                 }
724                 fill_audio (period);
725                 _silent.set_position (period.to);
726                 break;
727         }
728         case NONE:
729                 done = true;
730                 break;
731         }
732
733         /* Emit any audio that is ready */
734
735         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
736            of our streams, or the position of the _silent.
737         */
738         auto pull_to = _playback_length;
739         for (auto i: _pieces) {
740                 i->update_pull_to (pull_to);
741         }
742         if (!_silent.done() && _silent.position() < pull_to) {
743                 pull_to = _silent.position();
744         }
745
746         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
747         auto audio = _audio_merger.pull (pull_to);
748         for (auto i = audio.begin(); i != audio.end(); ++i) {
749                 if (_last_audio_time && i->second < *_last_audio_time) {
750                         /* This new data comes before the last we emitted (or the last seek); discard it */
751                         auto cut = discard_audio (i->first, i->second, *_last_audio_time);
752                         if (!cut.first) {
753                                 continue;
754                         }
755                         *i = cut;
756                 } else if (_last_audio_time && i->second > *_last_audio_time) {
757                         /* There's a gap between this data and the last we emitted; fill with silence */
758                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
759                 }
760
761                 emit_audio (i->first, i->second);
762         }
763
764         if (done) {
765                 _shuffler->flush ();
766                 for (auto const& i: _delay) {
767                         do_emit_video(i.first, i.second);
768                 }
769         }
770
771         return done;
772 }
773
774
775 /** @return Open subtitles for the frame at the given time, converted to images */
776 optional<PositionImage>
777 Player::open_subtitles_for_frame (DCPTime time) const
778 {
779         list<PositionImage> captions;
780         int const vfr = _film->video_frame_rate();
781
782         for (
783                 auto j:
784                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
785                 ) {
786
787                 /* Bitmap subtitles */
788                 for (auto i: j.bitmap) {
789                         if (!i.image) {
790                                 continue;
791                         }
792
793                         /* i.image will already have been scaled to fit _video_container_size */
794                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
795
796                         captions.push_back (
797                                 PositionImage (
798                                         i.image,
799                                         Position<int> (
800                                                 lrint(_video_container_size.width * i.rectangle.x),
801                                                 lrint(_video_container_size.height * i.rectangle.y)
802                                                 )
803                                         )
804                                 );
805                 }
806
807                 /* String subtitles (rendered to an image) */
808                 if (!j.string.empty()) {
809                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
810                         copy (s.begin(), s.end(), back_inserter (captions));
811                 }
812         }
813
814         if (captions.empty()) {
815                 return {};
816         }
817
818         return merge (captions);
819 }
820
821
822 void
823 Player::video (weak_ptr<Piece> wp, PieceVideo video)
824 {
825         auto piece = wp.lock ();
826         if (!piece) {
827                 return;
828         }
829
830         if (!piece->use_video()) {
831                 return;
832         }
833
834         auto frc = piece->frame_rate_change();
835         if (frc.skip && (video.frame % 2) == 1) {
836                 return;
837         }
838
839         /* Time of the first frame we will emit */
840         DCPTime const time = piece->content_video_to_dcp (video.frame);
841         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
842
843         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
844            if it's after the content's period here as in that case we still need to fill any gap between
845            `now' and the end of the content's period.
846         */
847         if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
848                 return;
849         }
850
851         if (piece->ignore_video_at(time)) {
852                 return;
853         }
854
855         /* Fill gaps that we discover now that we have some video which needs to be emitted.
856            This is where we need to fill to.
857         */
858         DCPTime fill_to = min (time, piece->end());
859
860         if (_last_video_time) {
861                 DCPTime fill_from = max (*_last_video_time, piece->position());
862
863                 /* Fill if we have more than half a frame to do */
864                 if ((fill_to - fill_from) > one_video_frame() / 2) {
865                         auto last = _last_video.find (wp);
866                         if (_film->three_d()) {
867                                 auto fill_to_eyes = video.eyes;
868                                 if (fill_to_eyes == Eyes::BOTH) {
869                                         fill_to_eyes = Eyes::LEFT;
870                                 }
871                                 if (fill_to == piece->end()) {
872                                         /* Don't fill after the end of the content */
873                                         fill_to_eyes = Eyes::LEFT;
874                                 }
875                                 auto j = fill_from;
876                                 auto eyes = _last_video_eyes.get_value_or(Eyes::LEFT);
877                                 if (eyes == Eyes::BOTH) {
878                                         eyes = Eyes::LEFT;
879                                 }
880                                 while (j < fill_to || eyes != fill_to_eyes) {
881                                         if (last != _last_video.end()) {
882                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
883                                                 auto copy = last->second->shallow_copy();
884                                                 copy->set_eyes (eyes);
885                                                 emit_video (copy, j);
886                                         } else {
887                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
888                                                 emit_video (black_player_video_frame(eyes), j);
889                                         }
890                                         if (eyes == Eyes::RIGHT) {
891                                                 j += one_video_frame();
892                                         }
893                                         eyes = increment_eyes (eyes);
894                                 }
895                         } else {
896                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
897                                         if (last != _last_video.end()) {
898                                                 emit_video (last->second, j);
899                                         } else {
900                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
901                                         }
902                                 }
903                         }
904                 }
905         }
906
907         _last_video[wp] = piece->player_video (video, _video_container_size);
908
909         DCPTime t = time;
910         for (int i = 0; i < frc.repeat; ++i) {
911                 if (t < piece->end()) {
912                         emit_video (_last_video[wp], t);
913                 }
914                 t += one_video_frame ();
915         }
916 }
917
918
919 void
920 Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
921 {
922         DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
923
924         auto piece = wp.lock ();
925         if (!piece) {
926                 return;
927         }
928
929         int const rfr = piece->resampled_audio_frame_rate ();
930
931         /* Compute time in the DCP */
932         auto time = piece->resampled_audio_to_dcp (piece_audio.frame);
933         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", piece_audio.frame, to_string(time));
934
935         /* And the end of this block in the DCP */
936         auto end = time + DCPTime::from_frames(piece_audio.audio->frames(), rfr);
937
938         /* Remove anything that comes before the start or after the end of the content */
939         if (time < piece->position()) {
940                 auto cut = discard_audio (piece_audio.audio, time, piece->position());
941                 if (!cut.first) {
942                         /* This audio is entirely discarded */
943                         return;
944                 }
945                 piece_audio.audio = cut.first;
946                 time = cut.second;
947         } else if (time > piece->end()) {
948                 /* Discard it all */
949                 return;
950         } else if (end > piece->end()) {
951                 Frame const remaining_frames = DCPTime(piece->end() - time).frames_round(rfr);
952                 if (remaining_frames == 0) {
953                         return;
954                 }
955                 piece_audio.audio = make_shared<AudioBuffers>(piece_audio.audio, remaining_frames, 0);
956         }
957
958         DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
959
960         /* Gain */
961
962         if (piece->audio_gain() != 0) {
963                 auto gain = make_shared<AudioBuffers>(piece_audio.audio);
964                 gain->apply_gain (piece->audio_gain());
965                 piece_audio.audio = gain;
966         }
967
968         /* Remap */
969
970         piece_audio.audio = remap (piece_audio.audio, _film->audio_channels(), piece_audio.stream->mapping());
971
972         /* Process */
973
974         if (_audio_processor) {
975                 piece_audio.audio = _audio_processor->run (piece_audio.audio, _film->audio_channels ());
976         }
977
978         /* Push */
979
980         _audio_merger.push (piece_audio.audio, time);
981         piece->set_last_push_end (piece_audio.stream, time + DCPTime::from_frames(piece_audio.audio->frames(), _film->audio_frame_rate()));
982 }
983
984
985 void
986 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
987 {
988         auto piece = wp.lock ();
989         auto content = wc.lock ();
990         auto text = wt.lock ();
991         if (!piece || !content || !text) {
992                 return;
993         }
994
995         /* Apply content's subtitle offsets */
996         subtitle.sub.rectangle.x += text->x_offset ();
997         subtitle.sub.rectangle.y += text->y_offset ();
998
999         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1000         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1001         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1002
1003         /* Apply content's subtitle scale */
1004         subtitle.sub.rectangle.width *= text->x_scale ();
1005         subtitle.sub.rectangle.height *= text->y_scale ();
1006
1007         PlayerText ps;
1008         auto image = subtitle.sub.image;
1009
1010         /* We will scale the subtitle up to fit _video_container_size */
1011         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1012         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1013         if (width == 0 || height == 0) {
1014                 return;
1015         }
1016
1017         dcp::Size scaled_size (width, height);
1018         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1019         auto from = piece->content_time_to_dcp(content, subtitle.from());
1020         DCPOMATIC_ASSERT (from);
1021
1022         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1023 }
1024
1025
1026 void
1027 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
1028 {
1029         auto piece = wp.lock ();
1030         auto content = wc.lock ();
1031         auto text = wt.lock ();
1032         if (!piece || !content || !text) {
1033                 return;
1034         }
1035
1036         PlayerText ps;
1037         auto const from = piece->content_time_to_dcp(content, subtitle.from());
1038         DCPOMATIC_ASSERT (from);
1039
1040         if (from > piece->end()) {
1041                 return;
1042         }
1043
1044         for (auto s: subtitle.subs) {
1045                 s.set_h_position (s.h_position() + text->x_offset ());
1046                 s.set_v_position (s.v_position() + text->y_offset ());
1047                 float const xs = text->x_scale();
1048                 float const ys = text->y_scale();
1049                 float size = s.size();
1050
1051                 /* Adjust size to express the common part of the scaling;
1052                    e.g. if xs = ys = 0.5 we scale size by 2.
1053                 */
1054                 if (xs > 1e-5 && ys > 1e-5) {
1055                         size *= 1 / min (1 / xs, 1 / ys);
1056                 }
1057                 s.set_size (size);
1058
1059                 /* Then express aspect ratio changes */
1060                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1061                         s.set_aspect_adjust (xs / ys);
1062                 }
1063
1064                 s.set_in (dcp::Time(from->seconds(), 1000));
1065                 ps.string.push_back (StringText (s, text->outline_width()));
1066                 ps.add_fonts (text->fonts ());
1067         }
1068
1069         _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
1070 }
1071
1072
1073 void
1074 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
1075 {
1076         auto content = wc.lock ();
1077         auto text = wt.lock ();
1078         if (!text) {
1079                 return;
1080         }
1081
1082         if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
1083                 return;
1084         }
1085
1086         shared_ptr<Piece> piece = wp.lock ();
1087         if (!piece) {
1088                 return;
1089         }
1090
1091         auto const dcp_to = piece->content_time_to_dcp(content, to);
1092         DCPOMATIC_ASSERT (dcp_to);
1093
1094         if (*dcp_to > piece->end()) {
1095                 return;
1096         }
1097
1098         auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
1099
1100         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1101         if (text->use() && !always && !text->burn()) {
1102                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, *dcp_to));
1103         }
1104 }
1105
1106
1107 void
1108 Player::seek (DCPTime time, bool accurate)
1109 {
1110         boost::mutex::scoped_lock lm (_mutex);
1111         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1112
1113         if (_suspended) {
1114                 /* We can't seek in this state */
1115                 return;
1116         }
1117
1118         if (_shuffler) {
1119                 _shuffler->clear ();
1120         }
1121
1122         _delay.clear ();
1123
1124         if (_audio_processor) {
1125                 _audio_processor->flush ();
1126         }
1127
1128         _audio_merger.clear ();
1129         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1130                 _active_texts[i].clear ();
1131         }
1132
1133         for (auto i: _pieces) {
1134                 i->seek (time, accurate);
1135         }
1136
1137         if (accurate) {
1138                 _last_video_time = time;
1139                 _last_video_eyes = Eyes::LEFT;
1140                 _last_audio_time = time;
1141         } else {
1142                 _last_video_time = optional<DCPTime>();
1143                 _last_video_eyes = optional<Eyes>();
1144                 _last_audio_time = optional<DCPTime>();
1145         }
1146
1147         _black.set_position (time);
1148         _silent.set_position (time);
1149
1150         _last_video.clear ();
1151 }
1152
1153
1154 void
1155 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1156 {
1157         if (!_film->three_d()) {
1158                 if (pv->eyes() == Eyes::LEFT) {
1159                         /* Use left-eye images for both eyes... */
1160                         pv->set_eyes (Eyes::BOTH);
1161                 } else if (pv->eyes() == Eyes::RIGHT) {
1162                         /* ...and discard the right */
1163                         return;
1164                 }
1165         }
1166
1167         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1168            player before the video that requires them.
1169         */
1170         _delay.push_back (make_pair (pv, time));
1171
1172         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1173                 _last_video_time = time + one_video_frame();
1174         }
1175         _last_video_eyes = increment_eyes (pv->eyes());
1176
1177         if (_delay.size() < 3) {
1178                 return;
1179         }
1180
1181         auto to_do = _delay.front();
1182         _delay.pop_front();
1183         do_emit_video (to_do.first, to_do.second);
1184 }
1185
1186
1187 void
1188 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1189 {
1190         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1191                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1192                         _active_texts[i].clear_before (time);
1193                 }
1194         }
1195
1196         auto subtitles = open_subtitles_for_frame (time);
1197         if (subtitles) {
1198                 pv->set_text (subtitles.get ());
1199         }
1200
1201         Video (pv, time);
1202 }
1203
1204
1205 void
1206 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1207 {
1208         /* Log if the assert below is about to fail */
1209         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1210                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1211         }
1212
1213         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1214         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1215         Audio (data, time, _film->audio_frame_rate());
1216         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1217 }
1218
1219
1220 void
1221 Player::fill_audio (DCPTimePeriod period)
1222 {
1223         if (period.from == period.to) {
1224                 return;
1225         }
1226
1227         DCPOMATIC_ASSERT (period.from < period.to);
1228
1229         DCPTime t = period.from;
1230         while (t < period.to) {
1231                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1232                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1233                 if (samples) {
1234                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1235                         silence->make_silent ();
1236                         emit_audio (silence, t);
1237                 }
1238                 t += block;
1239         }
1240 }
1241
1242
1243 DCPTime
1244 Player::one_video_frame () const
1245 {
1246         return DCPTime::from_frames (1, _film->video_frame_rate ());
1247 }
1248
1249
1250 pair<shared_ptr<AudioBuffers>, DCPTime>
1251 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1252 {
1253         auto const discard_time = discard_to - time;
1254         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1255         auto remaining_frames = audio->frames() - discard_frames;
1256         if (remaining_frames <= 0) {
1257                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1258         }
1259         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1260         return make_pair(cut, time + discard_time);
1261 }
1262
1263
1264 void
1265 Player::set_dcp_decode_reduction (optional<int> reduction)
1266 {
1267         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1268
1269         {
1270                 boost::mutex::scoped_lock lm (_mutex);
1271
1272                 if (reduction == _dcp_decode_reduction) {
1273                         lm.unlock ();
1274                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1275                         return;
1276                 }
1277
1278                 _dcp_decode_reduction = reduction;
1279                 setup_pieces_unlocked ();
1280         }
1281
1282         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1283 }
1284
1285
1286 shared_ptr<const Playlist>
1287 Player::playlist () const
1288 {
1289         return _playlist ? _playlist : _film->playlist();
1290 }
1291
1292
1293 void
1294 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1295 {
1296         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1297 }
1298