Add fade in/out option to the content audio tab (#1026).
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_buffers.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
26 #include "audio_processor.h"
27 #include "compose.hpp"
28 #include "config.h"
29 #include "content_audio.h"
30 #include "content_video.h"
31 #include "dcp_content.h"
32 #include "dcp_decoder.h"
33 #include "dcpomatic_log.h"
34 #include "decoder.h"
35 #include "decoder_factory.h"
36 #include "ffmpeg_content.h"
37 #include "film.h"
38 #include "frame_rate_change.h"
39 #include "image.h"
40 #include "image_decoder.h"
41 #include "job.h"
42 #include "log.h"
43 #include "maths_util.h"
44 #include "piece.h"
45 #include "player.h"
46 #include "player_video.h"
47 #include "playlist.h"
48 #include "ratio.h"
49 #include "raw_image_proxy.h"
50 #include "referenced_reel_asset.h"
51 #include "render_text.h"
52 #include "shuffler.h"
53 #include "text_content.h"
54 #include "text_decoder.h"
55 #include "timer.h"
56 #include "video_decoder.h"
57 #include <dcp/reel.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <dcp/reel_picture_asset.h>
60 #include <dcp/reel_sound_asset.h>
61 #include <dcp/reel_subtitle_asset.h>
62 #include <algorithm>
63 #include <iostream>
64 #include <stdint.h>
65
66 #include "i18n.h"
67
68
69 using std::copy;
70 using std::cout;
71 using std::dynamic_pointer_cast;
72 using std::list;
73 using std::make_pair;
74 using std::make_shared;
75 using std::make_shared;
76 using std::max;
77 using std::min;
78 using std::min;
79 using std::pair;
80 using std::shared_ptr;
81 using std::vector;
82 using std::weak_ptr;
83 using boost::optional;
84 using boost::scoped_ptr;
85 #if BOOST_VERSION >= 106100
86 using namespace boost::placeholders;
87 #endif
88 using namespace dcpomatic;
89
90
91 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
92 int const PlayerProperty::PLAYLIST = 701;
93 int const PlayerProperty::FILM_CONTAINER = 702;
94 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
95 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
96 int const PlayerProperty::PLAYBACK_LENGTH = 705;
97
98
99 Player::Player (shared_ptr<const Film> film, Image::Alignment subtitle_alignment)
100         : _film (film)
101         , _suspended (0)
102         , _tolerant (film->tolerant())
103         , _audio_merger (_film->audio_frame_rate())
104         , _subtitle_alignment (subtitle_alignment)
105 {
106         construct ();
107 }
108
109
110 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
111         : _film (film)
112         , _playlist (playlist_)
113         , _suspended (0)
114         , _tolerant (film->tolerant())
115         , _audio_merger (_film->audio_frame_rate())
116 {
117         construct ();
118 }
119
120
121 void
122 Player::construct ()
123 {
124         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
125         /* The butler must hear about this first, so since we are proxying this through to the butler we must
126            be first.
127         */
128         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
129         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
130         set_video_container_size (_film->frame_size ());
131
132         film_change (ChangeType::DONE, Film::Property::AUDIO_PROCESSOR);
133
134         setup_pieces ();
135         seek (DCPTime (), true);
136 }
137
138
139 void
140 Player::setup_pieces ()
141 {
142         boost::mutex::scoped_lock lm (_mutex);
143         setup_pieces_unlocked ();
144 }
145
146
147 bool
148 have_video (shared_ptr<const Content> content)
149 {
150         return static_cast<bool>(content->video) && content->video->use();
151 }
152
153
154 bool
155 have_audio (shared_ptr<const Content> content)
156 {
157         return static_cast<bool>(content->audio);
158 }
159
160
161 void
162 Player::setup_pieces_unlocked ()
163 {
164         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
165
166         auto old_pieces = _pieces;
167         _pieces.clear ();
168
169         _shuffler.reset (new Shuffler());
170         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
171
172         for (auto i: playlist()->content()) {
173
174                 if (!i->paths_valid ()) {
175                         continue;
176                 }
177
178                 if (_ignore_video && _ignore_audio && i->text.empty()) {
179                         /* We're only interested in text and this content has none */
180                         continue;
181                 }
182
183                 shared_ptr<Decoder> old_decoder;
184                 for (auto j: old_pieces) {
185                         if (j->content == i) {
186                                 old_decoder = j->decoder;
187                                 break;
188                         }
189                 }
190
191                 auto decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
192                 DCPOMATIC_ASSERT (decoder);
193
194                 FrameRateChange frc (_film, i);
195
196                 if (decoder->video && _ignore_video) {
197                         decoder->video->set_ignore (true);
198                 }
199
200                 if (decoder->audio && _ignore_audio) {
201                         decoder->audio->set_ignore (true);
202                 }
203
204                 if (_ignore_text) {
205                         for (auto i: decoder->text) {
206                                 i->set_ignore (true);
207                         }
208                 }
209
210                 auto dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
211                 if (dcp) {
212                         dcp->set_decode_referenced (_play_referenced);
213                         if (_play_referenced) {
214                                 dcp->set_forced_reduction (_dcp_decode_reduction);
215                         }
216                 }
217
218                 auto piece = make_shared<Piece>(i, decoder, frc);
219                 _pieces.push_back (piece);
220
221                 if (decoder->video) {
222                         if (i->video->frame_type() == VideoFrameType::THREE_D_LEFT || i->video->frame_type() == VideoFrameType::THREE_D_RIGHT) {
223                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
224                                 decoder->video->Data.connect (bind(&Shuffler::video, _shuffler.get(), weak_ptr<Piece>(piece), _1));
225                         } else {
226                                 decoder->video->Data.connect (bind(&Player::video, this, weak_ptr<Piece>(piece), _1));
227                         }
228                 }
229
230                 if (decoder->audio) {
231                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
232                 }
233
234                 auto j = decoder->text.begin();
235
236                 while (j != decoder->text.end()) {
237                         (*j)->BitmapStart.connect (
238                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
239                                 );
240                         (*j)->PlainStart.connect (
241                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
242                                 );
243                         (*j)->Stop.connect (
244                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
245                                 );
246
247                         ++j;
248                 }
249
250                 if (decoder->atmos) {
251                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
252                 }
253         }
254
255         _stream_states.clear ();
256         for (auto i: _pieces) {
257                 if (i->content->audio) {
258                         for (auto j: i->content->audio->streams()) {
259                                 _stream_states[j] = StreamState (i, i->content->position ());
260                         }
261                 }
262         }
263
264         for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
265                 if (auto video = (*i)->content->video) {
266                         if (video->use() && video->frame_type() != VideoFrameType::THREE_D_LEFT && video->frame_type() != VideoFrameType::THREE_D_RIGHT) {
267                                 /* Look for content later in the content list with in-use video that overlaps this */
268                                 auto period = DCPTimePeriod((*i)->content->position(), (*i)->content->end(_film));
269                                 auto j = i;
270                                 ++j;
271                                 for (; j != _pieces.end(); ++j) {
272                                         if ((*j)->content->video && (*j)->content->video->use()) {
273                                                 (*i)->ignore_video = DCPTimePeriod((*j)->content->position(), (*j)->content->end(_film)).overlap(period);
274                                         }
275                                 }
276                         }
277                 }
278         }
279
280         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
281         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
282
283         _next_video_time = boost::none;
284         _next_video_eyes = Eyes::BOTH;
285         _next_audio_time = boost::none;
286 }
287
288
289 void
290 Player::playlist_content_change (ChangeType type, int property, bool frequent)
291 {
292         if (property == VideoContentProperty::CROP) {
293                 if (type == ChangeType::DONE) {
294                         auto const vcs = video_container_size();
295                         boost::mutex::scoped_lock lm (_mutex);
296                         for (auto const& i: _delay) {
297                                 i.first->reset_metadata (_film, vcs);
298                         }
299                 }
300         } else {
301                 if (type == ChangeType::PENDING) {
302                         /* The player content is probably about to change, so we can't carry on
303                            until that has happened and we've rebuilt our pieces.  Stop pass()
304                            and seek() from working until then.
305                         */
306                         ++_suspended;
307                 } else if (type == ChangeType::DONE) {
308                         /* A change in our content has gone through.  Re-build our pieces. */
309                         setup_pieces ();
310                         --_suspended;
311                 } else if (type == ChangeType::CANCELLED) {
312                         --_suspended;
313                 }
314         }
315
316         Change (type, property, frequent);
317 }
318
319
320 void
321 Player::set_video_container_size (dcp::Size s)
322 {
323         Change (ChangeType::PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
324
325         {
326                 boost::mutex::scoped_lock lm (_mutex);
327
328                 if (s == _video_container_size) {
329                         lm.unlock ();
330                         Change (ChangeType::CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
331                         return;
332                 }
333
334                 _video_container_size = s;
335
336                 _black_image = make_shared<Image>(AV_PIX_FMT_RGB24, _video_container_size, Image::Alignment::PADDED);
337                 _black_image->make_black ();
338         }
339
340         Change (ChangeType::DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
341 }
342
343
344 void
345 Player::playlist_change (ChangeType type)
346 {
347         if (type == ChangeType::DONE) {
348                 setup_pieces ();
349         }
350         Change (type, PlayerProperty::PLAYLIST, false);
351 }
352
353
354 void
355 Player::film_change (ChangeType type, Film::Property p)
356 {
357         /* Here we should notice Film properties that affect our output, and
358            alert listeners that our output now would be different to how it was
359            last time we were run.
360         */
361
362         if (p == Film::Property::CONTAINER) {
363                 Change (type, PlayerProperty::FILM_CONTAINER, false);
364         } else if (p == Film::Property::VIDEO_FRAME_RATE) {
365                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
366                    so we need new pieces here.
367                 */
368                 if (type == ChangeType::DONE) {
369                         setup_pieces ();
370                 }
371                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
372         } else if (p == Film::Property::AUDIO_PROCESSOR) {
373                 if (type == ChangeType::DONE && _film->audio_processor ()) {
374                         boost::mutex::scoped_lock lm (_mutex);
375                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
376                 }
377         } else if (p == Film::Property::AUDIO_CHANNELS) {
378                 if (type == ChangeType::DONE) {
379                         boost::mutex::scoped_lock lm (_mutex);
380                         _audio_merger.clear ();
381                 }
382         }
383 }
384
385
386 shared_ptr<PlayerVideo>
387 Player::black_player_video_frame (Eyes eyes) const
388 {
389         return std::make_shared<PlayerVideo> (
390                 std::make_shared<const RawImageProxy>(_black_image),
391                 Crop(),
392                 optional<double>(),
393                 _video_container_size,
394                 _video_container_size,
395                 eyes,
396                 Part::WHOLE,
397                 PresetColourConversion::all().front().conversion,
398                 VideoRange::FULL,
399                 std::weak_ptr<Content>(),
400                 boost::optional<Frame>(),
401                 false
402         );
403 }
404
405
406 Frame
407 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
408 {
409         auto s = t - piece->content->position ();
410         s = min (piece->content->length_after_trim(_film), s);
411         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
412
413         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
414            then convert that ContentTime to frames at the content's rate.  However this fails for
415            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
416            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
417
418            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
419         */
420         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
421 }
422
423
424 DCPTime
425 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
426 {
427         /* See comment in dcp_to_content_video */
428         auto const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
429         return d + piece->content->position();
430 }
431
432
433 Frame
434 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
435 {
436         auto s = t - piece->content->position ();
437         s = min (piece->content->length_after_trim(_film), s);
438         /* See notes in dcp_to_content_video */
439         return max (DCPTime(), DCPTime(piece->content->trim_start(), piece->frc) + s).frames_floor(_film->audio_frame_rate());
440 }
441
442
443 DCPTime
444 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
445 {
446         /* See comment in dcp_to_content_video */
447         return DCPTime::from_frames (f, _film->audio_frame_rate())
448                 - DCPTime (piece->content->trim_start(), piece->frc)
449                 + piece->content->position();
450 }
451
452
453 ContentTime
454 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
455 {
456         auto s = t - piece->content->position ();
457         s = min (piece->content->length_after_trim(_film), s);
458         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
459 }
460
461
462 DCPTime
463 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
464 {
465         return max (DCPTime(), DCPTime(t - piece->content->trim_start(), piece->frc) + piece->content->position());
466 }
467
468
469 vector<FontData>
470 Player::get_subtitle_fonts ()
471 {
472         boost::mutex::scoped_lock lm (_mutex);
473
474         vector<FontData> fonts;
475         for (auto i: _pieces) {
476                 /* XXX: things may go wrong if there are duplicate font IDs
477                    with different font files.
478                 */
479                 auto f = i->decoder->fonts ();
480                 copy (f.begin(), f.end(), back_inserter(fonts));
481         }
482
483         return fonts;
484 }
485
486
487 /** Set this player never to produce any video data */
488 void
489 Player::set_ignore_video ()
490 {
491         boost::mutex::scoped_lock lm (_mutex);
492         _ignore_video = true;
493         setup_pieces_unlocked ();
494 }
495
496
497 void
498 Player::set_ignore_audio ()
499 {
500         boost::mutex::scoped_lock lm (_mutex);
501         _ignore_audio = true;
502         setup_pieces_unlocked ();
503 }
504
505
506 void
507 Player::set_ignore_text ()
508 {
509         boost::mutex::scoped_lock lm (_mutex);
510         _ignore_text = true;
511         setup_pieces_unlocked ();
512 }
513
514
515 /** Set the player to always burn open texts into the image regardless of the content settings */
516 void
517 Player::set_always_burn_open_subtitles ()
518 {
519         boost::mutex::scoped_lock lm (_mutex);
520         _always_burn_open_subtitles = true;
521 }
522
523
524 /** Sets up the player to be faster, possibly at the expense of quality */
525 void
526 Player::set_fast ()
527 {
528         boost::mutex::scoped_lock lm (_mutex);
529         _fast = true;
530         setup_pieces_unlocked ();
531 }
532
533
534 void
535 Player::set_play_referenced ()
536 {
537         boost::mutex::scoped_lock lm (_mutex);
538         _play_referenced = true;
539         setup_pieces_unlocked ();
540 }
541
542
543 static void
544 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
545 {
546         DCPOMATIC_ASSERT (r);
547         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
548         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
549         if (r->actual_duration() > 0) {
550                 a.push_back (
551                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
552                         );
553         }
554 }
555
556
557 list<ReferencedReelAsset>
558 Player::get_reel_assets ()
559 {
560         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
561
562         list<ReferencedReelAsset> reel_assets;
563
564         for (auto content: playlist()->content()) {
565                 auto dcp = dynamic_pointer_cast<DCPContent>(content);
566                 if (!dcp) {
567                         continue;
568                 }
569
570                 if (!dcp->reference_video() && !dcp->reference_audio() && !dcp->reference_text(TextType::OPEN_SUBTITLE) && !dcp->reference_text(TextType::CLOSED_CAPTION)) {
571                         continue;
572                 }
573
574                 scoped_ptr<DCPDecoder> decoder;
575                 try {
576                         decoder.reset (new DCPDecoder(_film, dcp, false, false, shared_ptr<DCPDecoder>()));
577                 } catch (...) {
578                         return reel_assets;
579                 }
580
581                 auto const frame_rate = _film->video_frame_rate();
582                 DCPOMATIC_ASSERT (dcp->video_frame_rate());
583                 /* We should only be referencing if the DCP rate is the same as the film rate */
584                 DCPOMATIC_ASSERT (std::round(dcp->video_frame_rate().get()) == frame_rate);
585
586                 Frame const trim_start = dcp->trim_start().frames_round(frame_rate);
587                 Frame const trim_end = dcp->trim_end().frames_round(frame_rate);
588
589                 /* position in the asset from the start */
590                 int64_t offset_from_start = 0;
591                 /* position i the asset from the end */
592                 int64_t offset_from_end = 0;
593                 for (auto reel: decoder->reels()) {
594                         /* Assume that main picture duration is the length of the reel */
595                         offset_from_end += reel->main_picture()->actual_duration();
596                 }
597
598                 for (auto reel: decoder->reels()) {
599
600                         /* Assume that main picture duration is the length of the reel */
601                         int64_t const reel_duration = reel->main_picture()->actual_duration();
602
603                         /* See doc/design/trim_reels.svg */
604                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
605                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
606
607                         auto const from = max(DCPTime(), content->position() + DCPTime::from_frames(offset_from_start, frame_rate) - DCPTime::from_frames(trim_start, frame_rate));
608                         if (dcp->reference_video()) {
609                                 maybe_add_asset (reel_assets, reel->main_picture(), reel_trim_start, reel_trim_end, from, frame_rate);
610                         }
611
612                         if (dcp->reference_audio()) {
613                                 maybe_add_asset (reel_assets, reel->main_sound(), reel_trim_start, reel_trim_end, from, frame_rate);
614                         }
615
616                         if (dcp->reference_text(TextType::OPEN_SUBTITLE)) {
617                                 maybe_add_asset (reel_assets, reel->main_subtitle(), reel_trim_start, reel_trim_end, from, frame_rate);
618                         }
619
620                         if (dcp->reference_text(TextType::CLOSED_CAPTION)) {
621                                 for (auto caption: reel->closed_captions()) {
622                                         maybe_add_asset (reel_assets, caption, reel_trim_start, reel_trim_end, from, frame_rate);
623                                 }
624                         }
625
626                         offset_from_start += reel_duration;
627                         offset_from_end -= reel_duration;
628                 }
629         }
630
631         return reel_assets;
632 }
633
634
635 bool
636 Player::pass ()
637 {
638         boost::mutex::scoped_lock lm (_mutex);
639
640         if (_suspended) {
641                 /* We can't pass in this state */
642                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
643                 return false;
644         }
645
646         if (_playback_length == DCPTime()) {
647                 /* Special; just give one black frame */
648                 emit_video (black_player_video_frame(Eyes::BOTH), DCPTime());
649                 return true;
650         }
651
652         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
653
654         shared_ptr<Piece> earliest_content;
655         optional<DCPTime> earliest_time;
656
657         for (auto i: _pieces) {
658                 if (i->done) {
659                         continue;
660                 }
661
662                 auto const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
663                 if (t > i->content->end(_film)) {
664                         i->done = true;
665                 } else {
666
667                         /* Given two choices at the same time, pick the one with texts so we see it before
668                            the video.
669                         */
670                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
671                                 earliest_time = t;
672                                 earliest_content = i;
673                         }
674                 }
675         }
676
677         bool done = false;
678
679         enum {
680                 NONE,
681                 CONTENT,
682                 BLACK,
683                 SILENT
684         } which = NONE;
685
686         if (earliest_content) {
687                 which = CONTENT;
688         }
689
690         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
691                 earliest_time = _black.position ();
692                 which = BLACK;
693         }
694
695         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
696                 earliest_time = _silent.position ();
697                 which = SILENT;
698         }
699
700         switch (which) {
701         case CONTENT:
702         {
703                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
704                 earliest_content->done = earliest_content->decoder->pass ();
705                 auto dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
706                 if (dcp && !_play_referenced && dcp->reference_audio()) {
707                         /* We are skipping some referenced DCP audio content, so we need to update _next_audio_time
708                            to `hide' the fact that no audio was emitted during the referenced DCP (though
709                            we need to behave as though it was).
710                         */
711                         _next_audio_time = dcp->end (_film);
712                 }
713                 break;
714         }
715         case BLACK:
716                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
717                 emit_video (black_player_video_frame(Eyes::BOTH), _black.position());
718                 _black.set_position (_black.position() + one_video_frame());
719                 break;
720         case SILENT:
721         {
722                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
723                 DCPTimePeriod period (_silent.period_at_position());
724                 if (_next_audio_time) {
725                         /* Sometimes the thing that happened last finishes fractionally before
726                            or after this silence.  Bodge the start time of the silence to fix it.
727                            I think this is nothing to worry about since we will just add or
728                            remove a little silence at the end of some content.
729                         */
730                         int64_t const error = labs(period.from.get() - _next_audio_time->get());
731                         /* Let's not worry about less than a frame at 24fps */
732                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
733                         if (error >= too_much_error) {
734                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
735                         }
736                         DCPOMATIC_ASSERT (error < too_much_error);
737                         period.from = *_next_audio_time;
738                 }
739                 if (period.duration() > one_video_frame()) {
740                         period.to = period.from + one_video_frame();
741                 }
742                 fill_audio (period);
743                 _silent.set_position (period.to);
744                 break;
745         }
746         case NONE:
747                 done = true;
748                 break;
749         }
750
751         /* Emit any audio that is ready */
752
753         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
754            of our streams, or the position of the _silent.  First, though we choose only streams that are less than
755            ignore_streams_behind seconds behind the furthest ahead (we assume that if a stream has fallen that far
756            behind it has finished).  This is so that we don't withhold audio indefinitely awaiting data from a stream
757            that will never come, causing bugs like #2101.
758         */
759         constexpr int ignore_streams_behind = 5;
760
761         using state_pair = std::pair<AudioStreamPtr, StreamState>;
762
763         /* Find the 'leading' stream (i.e. the one that pushed data most recently) */
764         auto latest_last_push_end = std::max_element(
765                 _stream_states.begin(),
766                 _stream_states.end(),
767                 [](state_pair const& a, state_pair const& b) { return a.second.last_push_end < b.second.last_push_end; }
768                 );
769
770         if (latest_last_push_end != _stream_states.end()) {
771                 LOG_DEBUG_PLAYER("Leading stream is in %1 at %2", latest_last_push_end->second.piece->content->path(0), to_string(latest_last_push_end->second.last_push_end));
772         }
773
774         /* Now make a list of those streams that are less than ignore_streams_behind behind the leader */
775         std::map<AudioStreamPtr, StreamState> alive_stream_states;
776         for (auto const& i: _stream_states) {
777                 if ((latest_last_push_end->second.last_push_end - i.second.last_push_end) < dcpomatic::DCPTime::from_seconds(ignore_streams_behind)) {
778                         alive_stream_states.insert(i);
779                 } else {
780                         LOG_DEBUG_PLAYER("Ignoring stream %1 because it is too far behind", i.second.piece->content->path(0));
781                 }
782         }
783
784         auto pull_to = _playback_length;
785         for (auto const& i: alive_stream_states) {
786                 if (!i.second.piece->done && i.second.last_push_end < pull_to) {
787                         pull_to = i.second.last_push_end;
788                 }
789         }
790         if (!_silent.done() && _silent.position() < pull_to) {
791                 pull_to = _silent.position();
792         }
793
794         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
795         auto audio = _audio_merger.pull (pull_to);
796         for (auto i = audio.begin(); i != audio.end(); ++i) {
797                 if (_next_audio_time && i->second < *_next_audio_time) {
798                         /* This new data comes before the last we emitted (or the last seek); discard it */
799                         auto cut = discard_audio (i->first, i->second, *_next_audio_time);
800                         if (!cut.first) {
801                                 continue;
802                         }
803                         *i = cut;
804                 } else if (_next_audio_time && i->second > *_next_audio_time) {
805                         /* There's a gap between this data and the last we emitted; fill with silence */
806                         fill_audio (DCPTimePeriod (*_next_audio_time, i->second));
807                 }
808
809                 emit_audio (i->first, i->second);
810         }
811
812         if (done) {
813                 _shuffler->flush ();
814                 for (auto const& i: _delay) {
815                         do_emit_video(i.first, i.second);
816                 }
817
818                 /* Perhaps we should have Empty entries for both eyes in the 3D case (somehow).
819                  * However, if we have L and R video files, and one is shorter than the other,
820                  * the fill code in ::video mostly takes care of filling in the gaps.
821                  * However, since it fills at the point when it knows there is more video coming
822                  * at time t (so it should fill any gap up to t) it can't do anything right at the
823                  * end.  This is particularly bad news if the last frame emitted is a LEFT
824                  * eye, as the MXF writer will complain about the 3D sequence being wrong.
825                  * Here's a hack to workaround that particular case.
826                  */
827                 if (_next_video_eyes && _next_video_time && *_next_video_eyes == Eyes::RIGHT) {
828                         do_emit_video (black_player_video_frame(Eyes::RIGHT), *_next_video_time);
829                 }
830         }
831
832         return done;
833 }
834
835
836 /** @return Open subtitles for the frame at the given time, converted to images */
837 optional<PositionImage>
838 Player::open_subtitles_for_frame (DCPTime time) const
839 {
840         list<PositionImage> captions;
841         int const vfr = _film->video_frame_rate();
842
843         for (
844                 auto j:
845                 _active_texts[static_cast<int>(TextType::OPEN_SUBTITLE)].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
846                 ) {
847
848                 /* Bitmap subtitles */
849                 for (auto i: j.bitmap) {
850                         if (!i.image) {
851                                 continue;
852                         }
853
854                         /* i.image will already have been scaled to fit _video_container_size */
855                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
856
857                         captions.push_back (
858                                 PositionImage (
859                                         i.image,
860                                         Position<int> (
861                                                 lrint(_video_container_size.width * i.rectangle.x),
862                                                 lrint(_video_container_size.height * i.rectangle.y)
863                                                 )
864                                         )
865                                 );
866                 }
867
868                 /* String subtitles (rendered to an image) */
869                 if (!j.string.empty()) {
870                         auto s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
871                         copy (s.begin(), s.end(), back_inserter (captions));
872                 }
873         }
874
875         if (captions.empty()) {
876                 return {};
877         }
878
879         return merge (captions, _subtitle_alignment);
880 }
881
882
883 void
884 Player::video (weak_ptr<Piece> wp, ContentVideo video)
885 {
886         if (_suspended) {
887                 return;
888         }
889
890         auto piece = wp.lock ();
891         if (!piece) {
892                 return;
893         }
894
895         if (!piece->content->video->use()) {
896                 return;
897         }
898
899         FrameRateChange frc (_film, piece->content);
900         if (frc.skip && (video.frame % 2) == 1) {
901                 return;
902         }
903
904         /* Time of the first frame we will emit */
905         DCPTime const time = content_video_to_dcp (piece, video.frame);
906         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
907
908         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
909            if it's after the content's period here as in that case we still need to fill any gap between
910            `now' and the end of the content's period.
911         */
912         if (time < piece->content->position() || (_next_video_time && time < *_next_video_time)) {
913                 return;
914         }
915
916         if (piece->ignore_video && piece->ignore_video->contains(time)) {
917                 return;
918         }
919
920         /* Fill gaps that we discover now that we have some video which needs to be emitted.
921            This is where we need to fill to.
922         */
923         DCPTime fill_to = min (time, piece->content->end(_film));
924
925         if (_next_video_time) {
926                 DCPTime fill_from = max (*_next_video_time, piece->content->position());
927
928                 /* Fill if we have more than half a frame to do */
929                 if ((fill_to - fill_from) > one_video_frame() / 2) {
930                         auto last = _last_video.find (wp);
931                         if (_film->three_d()) {
932                                 auto fill_to_eyes = video.eyes;
933                                 if (fill_to_eyes == Eyes::BOTH) {
934                                         fill_to_eyes = Eyes::LEFT;
935                                 }
936                                 if (fill_to == piece->content->end(_film)) {
937                                         /* Don't fill after the end of the content */
938                                         fill_to_eyes = Eyes::LEFT;
939                                 }
940                                 auto j = fill_from;
941                                 auto eyes = _next_video_eyes.get_value_or(Eyes::LEFT);
942                                 if (eyes == Eyes::BOTH) {
943                                         eyes = Eyes::LEFT;
944                                 }
945                                 while (j < fill_to || eyes != fill_to_eyes) {
946                                         if (last != _last_video.end()) {
947                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
948                                                 auto copy = last->second->shallow_copy();
949                                                 copy->set_eyes (eyes);
950                                                 emit_video (copy, j);
951                                         } else {
952                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
953                                                 emit_video (black_player_video_frame(eyes), j);
954                                         }
955                                         if (eyes == Eyes::RIGHT) {
956                                                 j += one_video_frame();
957                                         }
958                                         eyes = increment_eyes (eyes);
959                                 }
960                         } else {
961                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
962                                         if (last != _last_video.end()) {
963                                                 emit_video (last->second, j);
964                                         } else {
965                                                 emit_video (black_player_video_frame(Eyes::BOTH), j);
966                                         }
967                                 }
968                         }
969                 }
970         }
971
972         auto const content_video = piece->content->video;
973
974         _last_video[wp] = std::make_shared<PlayerVideo>(
975                 video.image,
976                 content_video->actual_crop(),
977                 content_video->fade (_film, video.frame),
978                 scale_for_display(
979                         content_video->scaled_size(_film->frame_size()),
980                         _video_container_size,
981                         _film->frame_size(),
982                         content_video->pixel_quanta()
983                         ),
984                 _video_container_size,
985                 video.eyes,
986                 video.part,
987                 content_video->colour_conversion(),
988                 content_video->range(),
989                 piece->content,
990                 video.frame,
991                 false
992                 );
993
994         DCPTime t = time;
995         for (int i = 0; i < frc.repeat; ++i) {
996                 if (t < piece->content->end(_film)) {
997                         emit_video (_last_video[wp], t);
998                 }
999                 t += one_video_frame ();
1000         }
1001 }
1002
1003
1004 void
1005 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
1006 {
1007         if (_suspended) {
1008                 return;
1009         }
1010
1011         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1012
1013         auto piece = wp.lock ();
1014         if (!piece) {
1015                 return;
1016         }
1017
1018         auto content = piece->content->audio;
1019         DCPOMATIC_ASSERT (content);
1020
1021         int const rfr = content->resampled_frame_rate (_film);
1022
1023         /* Compute time in the DCP */
1024         auto time = resampled_audio_to_dcp (piece, content_audio.frame);
1025         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
1026
1027         /* And the end of this block in the DCP */
1028         auto end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
1029
1030         /* Remove anything that comes before the start or after the end of the content */
1031         if (time < piece->content->position()) {
1032                 auto cut = discard_audio (content_audio.audio, time, piece->content->position());
1033                 if (!cut.first) {
1034                         /* This audio is entirely discarded */
1035                         return;
1036                 }
1037                 content_audio.audio = cut.first;
1038                 time = cut.second;
1039         } else if (time > piece->content->end(_film)) {
1040                 /* Discard it all */
1041                 return;
1042         } else if (end > piece->content->end(_film)) {
1043                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
1044                 if (remaining_frames == 0) {
1045                         return;
1046                 }
1047                 content_audio.audio = make_shared<AudioBuffers>(content_audio.audio, remaining_frames, 0);
1048         }
1049
1050         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
1051
1052         /* Gain and fade */
1053
1054         auto const fade_coeffs = content->fade (content_audio.frame, content_audio.audio->frames(), rfr);
1055         if (content->gain() != 0 || !fade_coeffs.empty()) {
1056                 auto gain_buffers = make_shared<AudioBuffers>(content_audio.audio);
1057                 if (!fade_coeffs.empty()) {
1058                         /* Apply both fade and gain */
1059                         DCPOMATIC_ASSERT (fade_coeffs.size() == static_cast<size_t>(gain_buffers->frames()));
1060                         auto const channels = gain_buffers->channels();
1061                         auto const frames = fade_coeffs.size();
1062                         auto data = gain_buffers->data();
1063                         auto const gain = db_to_linear (content->gain());
1064                         for (auto channel = 0; channel < channels; ++channel) {
1065                                 for (auto frame = 0U; frame < frames; ++frame) {
1066                                         data[channel][frame] *= gain * fade_coeffs[frame];
1067                                 }
1068                         }
1069                 } else {
1070                         /* Just apply gain */
1071                         gain_buffers->apply_gain (content->gain());
1072                 }
1073                 content_audio.audio = gain_buffers;
1074         }
1075
1076         /* Remap */
1077
1078         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
1079
1080         /* Process */
1081
1082         if (_audio_processor) {
1083                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
1084         }
1085
1086         /* Push */
1087
1088         _audio_merger.push (content_audio.audio, time);
1089         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
1090         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
1091 }
1092
1093
1094 void
1095 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
1096 {
1097         if (_suspended) {
1098                 return;
1099         }
1100
1101         auto piece = wp.lock ();
1102         auto text = wc.lock ();
1103         if (!piece || !text) {
1104                 return;
1105         }
1106
1107         /* Apply content's subtitle offsets */
1108         subtitle.sub.rectangle.x += text->x_offset ();
1109         subtitle.sub.rectangle.y += text->y_offset ();
1110
1111         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
1112         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
1113         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
1114
1115         /* Apply content's subtitle scale */
1116         subtitle.sub.rectangle.width *= text->x_scale ();
1117         subtitle.sub.rectangle.height *= text->y_scale ();
1118
1119         PlayerText ps;
1120         auto image = subtitle.sub.image;
1121
1122         /* We will scale the subtitle up to fit _video_container_size */
1123         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1124         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1125         if (width == 0 || height == 0) {
1126                 return;
1127         }
1128
1129         dcp::Size scaled_size (width, height);
1130         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), Image::Alignment::PADDED, _fast), subtitle.sub.rectangle));
1131         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1132
1133         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1134 }
1135
1136
1137 void
1138 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1139 {
1140         if (_suspended) {
1141                 return;
1142         }
1143
1144         auto piece = wp.lock ();
1145         auto text = wc.lock ();
1146         if (!piece || !text) {
1147                 return;
1148         }
1149
1150         PlayerText ps;
1151         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1152
1153         if (from > piece->content->end(_film)) {
1154                 return;
1155         }
1156
1157         for (auto s: subtitle.subs) {
1158                 s.set_h_position (s.h_position() + text->x_offset ());
1159                 s.set_v_position (s.v_position() + text->y_offset ());
1160                 float const xs = text->x_scale();
1161                 float const ys = text->y_scale();
1162                 float size = s.size();
1163
1164                 /* Adjust size to express the common part of the scaling;
1165                    e.g. if xs = ys = 0.5 we scale size by 2.
1166                 */
1167                 if (xs > 1e-5 && ys > 1e-5) {
1168                         size *= 1 / min (1 / xs, 1 / ys);
1169                 }
1170                 s.set_size (size);
1171
1172                 /* Then express aspect ratio changes */
1173                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1174                         s.set_aspect_adjust (xs / ys);
1175                 }
1176
1177                 s.set_in (dcp::Time(from.seconds(), 1000));
1178                 ps.string.push_back (StringText (s, text->outline_width()));
1179                 ps.add_fonts (text->fonts ());
1180         }
1181
1182         _active_texts[static_cast<int>(text->type())].add_from (wc, ps, from);
1183 }
1184
1185
1186 void
1187 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1188 {
1189         if (_suspended) {
1190                 return;
1191         }
1192
1193         auto text = wc.lock ();
1194         if (!text) {
1195                 return;
1196         }
1197
1198         if (!_active_texts[static_cast<int>(text->type())].have(wc)) {
1199                 return;
1200         }
1201
1202         shared_ptr<Piece> piece = wp.lock ();
1203         if (!piece) {
1204                 return;
1205         }
1206
1207         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1208
1209         if (dcp_to > piece->content->end(_film)) {
1210                 return;
1211         }
1212
1213         auto from = _active_texts[static_cast<int>(text->type())].add_to (wc, dcp_to);
1214
1215         bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
1216         if (text->use() && !always && !text->burn()) {
1217                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1218         }
1219 }
1220
1221
1222 void
1223 Player::seek (DCPTime time, bool accurate)
1224 {
1225         boost::mutex::scoped_lock lm (_mutex);
1226         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1227
1228         if (_suspended) {
1229                 /* We can't seek in this state */
1230                 return;
1231         }
1232
1233         if (_shuffler) {
1234                 _shuffler->clear ();
1235         }
1236
1237         _delay.clear ();
1238
1239         if (_audio_processor) {
1240                 _audio_processor->flush ();
1241         }
1242
1243         _audio_merger.clear ();
1244         for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1245                 _active_texts[i].clear ();
1246         }
1247
1248         for (auto i: _pieces) {
1249                 if (time < i->content->position()) {
1250                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1251                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1252                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1253                            been trimmed to a point between keyframes, or something).
1254                         */
1255                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1256                         i->done = false;
1257                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1258                         /* During; seek to position */
1259                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1260                         i->done = false;
1261                 } else {
1262                         /* After; this piece is done */
1263                         i->done = true;
1264                 }
1265         }
1266
1267         if (accurate) {
1268                 _next_video_time = time;
1269                 _next_video_eyes = Eyes::LEFT;
1270                 _next_audio_time = time;
1271         } else {
1272                 _next_video_time = boost::none;
1273                 _next_video_eyes = boost::none;
1274                 _next_audio_time = boost::none;
1275         }
1276
1277         _black.set_position (time);
1278         _silent.set_position (time);
1279
1280         _last_video.clear ();
1281 }
1282
1283
1284 void
1285 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1286 {
1287         if (!_film->three_d()) {
1288                 if (pv->eyes() == Eyes::LEFT) {
1289                         /* Use left-eye images for both eyes... */
1290                         pv->set_eyes (Eyes::BOTH);
1291                 } else if (pv->eyes() == Eyes::RIGHT) {
1292                         /* ...and discard the right */
1293                         return;
1294                 }
1295         }
1296
1297         /* We need a delay to give a little wiggle room to ensure that relevant subtitles arrive at the
1298            player before the video that requires them.
1299         */
1300         _delay.push_back (make_pair (pv, time));
1301
1302         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1303                 _next_video_time = time + one_video_frame();
1304         }
1305         _next_video_eyes = increment_eyes (pv->eyes());
1306
1307         if (_delay.size() < 3) {
1308                 return;
1309         }
1310
1311         auto to_do = _delay.front();
1312         _delay.pop_front();
1313         do_emit_video (to_do.first, to_do.second);
1314 }
1315
1316
1317 void
1318 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1319 {
1320         if (pv->eyes() == Eyes::BOTH || pv->eyes() == Eyes::RIGHT) {
1321                 for (int i = 0; i < static_cast<int>(TextType::COUNT); ++i) {
1322                         _active_texts[i].clear_before (time);
1323                 }
1324         }
1325
1326         auto subtitles = open_subtitles_for_frame (time);
1327         if (subtitles) {
1328                 pv->set_text (subtitles.get ());
1329         }
1330
1331         Video (pv, time);
1332 }
1333
1334
1335 void
1336 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1337 {
1338         /* Log if the assert below is about to fail */
1339         if (_next_audio_time && labs(time.get() - _next_audio_time->get()) > 1) {
1340                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_next_audio_time)), LogEntry::TYPE_WARNING);
1341         }
1342
1343         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1344         DCPOMATIC_ASSERT (!_next_audio_time || labs(time.get() - _next_audio_time->get()) < 2);
1345         Audio (data, time, _film->audio_frame_rate());
1346         _next_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1347 }
1348
1349
1350 void
1351 Player::fill_audio (DCPTimePeriod period)
1352 {
1353         if (period.from == period.to) {
1354                 return;
1355         }
1356
1357         DCPOMATIC_ASSERT (period.from < period.to);
1358
1359         DCPTime t = period.from;
1360         while (t < period.to) {
1361                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1362                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1363                 if (samples) {
1364                         auto silence = make_shared<AudioBuffers>(_film->audio_channels(), samples);
1365                         silence->make_silent ();
1366                         emit_audio (silence, t);
1367                 }
1368                 t += block;
1369         }
1370 }
1371
1372
1373 DCPTime
1374 Player::one_video_frame () const
1375 {
1376         return DCPTime::from_frames (1, _film->video_frame_rate ());
1377 }
1378
1379
1380 pair<shared_ptr<AudioBuffers>, DCPTime>
1381 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1382 {
1383         auto const discard_time = discard_to - time;
1384         auto const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1385         auto remaining_frames = audio->frames() - discard_frames;
1386         if (remaining_frames <= 0) {
1387                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1388         }
1389         auto cut = make_shared<AudioBuffers>(audio, remaining_frames, discard_frames);
1390         return make_pair(cut, time + discard_time);
1391 }
1392
1393
1394 void
1395 Player::set_dcp_decode_reduction (optional<int> reduction)
1396 {
1397         Change (ChangeType::PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1398
1399         {
1400                 boost::mutex::scoped_lock lm (_mutex);
1401
1402                 if (reduction == _dcp_decode_reduction) {
1403                         lm.unlock ();
1404                         Change (ChangeType::CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1405                         return;
1406                 }
1407
1408                 _dcp_decode_reduction = reduction;
1409                 setup_pieces_unlocked ();
1410         }
1411
1412         Change (ChangeType::DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1413 }
1414
1415
1416 optional<DCPTime>
1417 Player::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t)
1418 {
1419         boost::mutex::scoped_lock lm (_mutex);
1420
1421         for (auto i: _pieces) {
1422                 if (i->content == content) {
1423                         return content_time_to_dcp (i, t);
1424                 }
1425         }
1426
1427         /* We couldn't find this content; perhaps things are being changed over */
1428         return {};
1429 }
1430
1431
1432 optional<ContentTime>
1433 Player::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t)
1434 {
1435         boost::mutex::scoped_lock lm (_mutex);
1436
1437         for (auto i: _pieces) {
1438                 if (i->content == content) {
1439                         return dcp_to_content_time (i, t);
1440                 }
1441         }
1442
1443         /* We couldn't find this content; perhaps things are being changed over */
1444         return {};
1445 }
1446
1447
1448 shared_ptr<const Playlist>
1449 Player::playlist () const
1450 {
1451         return _playlist ? _playlist : _film->playlist();
1452 }
1453
1454
1455 void
1456 Player::atmos (weak_ptr<Piece> weak_piece, ContentAtmos data)
1457 {
1458         if (_suspended) {
1459                 return;
1460         }
1461
1462         auto piece = weak_piece.lock ();
1463         DCPOMATIC_ASSERT (piece);
1464
1465         auto const vfr = _film->video_frame_rate();
1466
1467         DCPTime const dcp_time = DCPTime::from_frames(data.frame, vfr) - DCPTime(piece->content->trim_start(), FrameRateChange(vfr, vfr));
1468         if (dcp_time < piece->content->position() || dcp_time >= (piece->content->end(_film))) {
1469                 return;
1470         }
1471
1472         Atmos (data.data, dcp_time, data.metadata);
1473 }
1474