std::shared_ptr
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "atmos_decoder.h"
22 #include "player.h"
23 #include "film.h"
24 #include "audio_buffers.h"
25 #include "content_audio.h"
26 #include "dcp_content.h"
27 #include "dcpomatic_log.h"
28 #include "job.h"
29 #include "image.h"
30 #include "raw_image_proxy.h"
31 #include "ratio.h"
32 #include "log.h"
33 #include "render_text.h"
34 #include "config.h"
35 #include "content_video.h"
36 #include "player_video.h"
37 #include "frame_rate_change.h"
38 #include "audio_processor.h"
39 #include "playlist.h"
40 #include "referenced_reel_asset.h"
41 #include "decoder_factory.h"
42 #include "decoder.h"
43 #include "video_decoder.h"
44 #include "audio_decoder.h"
45 #include "text_content.h"
46 #include "text_decoder.h"
47 #include "ffmpeg_content.h"
48 #include "audio_content.h"
49 #include "dcp_decoder.h"
50 #include "image_decoder.h"
51 #include "compose.hpp"
52 #include "shuffler.h"
53 #include "timer.h"
54 #include <dcp/reel.h>
55 #include <dcp/reel_sound_asset.h>
56 #include <dcp/reel_subtitle_asset.h>
57 #include <dcp/reel_picture_asset.h>
58 #include <dcp/reel_closed_caption_asset.h>
59 #include <boost/foreach.hpp>
60 #include <stdint.h>
61 #include <algorithm>
62 #include <iostream>
63
64 #include "i18n.h"
65
66 using std::list;
67 using std::cout;
68 using std::min;
69 using std::max;
70 using std::min;
71 using std::vector;
72 using std::pair;
73 using std::map;
74 using std::make_pair;
75 using std::copy;
76 using std::shared_ptr;
77 using std::weak_ptr;
78 using std::dynamic_pointer_cast;
79 using boost::optional;
80 using boost::scoped_ptr;
81 #if BOOST_VERSION >= 106100
82 using namespace boost::placeholders;
83 #endif
84 using namespace dcpomatic;
85
86 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
87 int const PlayerProperty::PLAYLIST = 701;
88 int const PlayerProperty::FILM_CONTAINER = 702;
89 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
90 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
91 int const PlayerProperty::PLAYBACK_LENGTH = 705;
92
93 Player::Player (shared_ptr<const Film> film)
94         : _film (film)
95         , _suspended (0)
96         , _ignore_video (false)
97         , _ignore_audio (false)
98         , _ignore_text (false)
99         , _always_burn_open_subtitles (false)
100         , _fast (false)
101         , _tolerant (film->tolerant())
102         , _play_referenced (false)
103         , _audio_merger (_film->audio_frame_rate())
104         , _shuffler (0)
105 {
106         construct ();
107 }
108
109 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist_)
110         : _film (film)
111         , _playlist (playlist_)
112         , _suspended (0)
113         , _ignore_video (false)
114         , _ignore_audio (false)
115         , _ignore_text (false)
116         , _always_burn_open_subtitles (false)
117         , _fast (false)
118         , _tolerant (film->tolerant())
119         , _play_referenced (false)
120         , _audio_merger (_film->audio_frame_rate())
121         , _shuffler (0)
122 {
123         construct ();
124 }
125
126 void
127 Player::construct ()
128 {
129         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
130         /* The butler must hear about this first, so since we are proxying this through to the butler we must
131            be first.
132         */
133         _playlist_change_connection = playlist()->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
134         _playlist_content_change_connection = playlist()->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
135         set_video_container_size (_film->frame_size ());
136
137         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
138
139         setup_pieces ();
140         seek (DCPTime (), true);
141 }
142
143 Player::~Player ()
144 {
145         delete _shuffler;
146 }
147
148 void
149 Player::setup_pieces ()
150 {
151         boost::mutex::scoped_lock lm (_mutex);
152         setup_pieces_unlocked ();
153 }
154
155
156 bool
157 have_video (shared_ptr<const Content> content)
158 {
159         return static_cast<bool>(content->video) && content->video->use();
160 }
161
162 bool
163 have_audio (shared_ptr<const Content> content)
164 {
165         return static_cast<bool>(content->audio);
166 }
167
168 void
169 Player::setup_pieces_unlocked ()
170 {
171         _playback_length = _playlist ? _playlist->length(_film) : _film->length();
172
173         list<shared_ptr<Piece> > old_pieces = _pieces;
174         _pieces.clear ();
175
176         delete _shuffler;
177         _shuffler = new Shuffler();
178         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
179
180         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
181
182                 if (!i->paths_valid ()) {
183                         continue;
184                 }
185
186                 if (_ignore_video && _ignore_audio && i->text.empty()) {
187                         /* We're only interested in text and this content has none */
188                         continue;
189                 }
190
191                 shared_ptr<Decoder> old_decoder;
192                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
193                         if (j->content == i) {
194                                 old_decoder = j->decoder;
195                                 break;
196                         }
197                 }
198
199                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
200                 DCPOMATIC_ASSERT (decoder);
201
202                 FrameRateChange frc (_film, i);
203
204                 if (decoder->video && _ignore_video) {
205                         decoder->video->set_ignore (true);
206                 }
207
208                 if (decoder->audio && _ignore_audio) {
209                         decoder->audio->set_ignore (true);
210                 }
211
212                 if (_ignore_text) {
213                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
214                                 i->set_ignore (true);
215                         }
216                 }
217
218                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
219                 if (dcp) {
220                         dcp->set_decode_referenced (_play_referenced);
221                         if (_play_referenced) {
222                                 dcp->set_forced_reduction (_dcp_decode_reduction);
223                         }
224                 }
225
226                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
227                 _pieces.push_back (piece);
228
229                 if (decoder->video) {
230                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
231                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
232                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
233                         } else {
234                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
235                         }
236                 }
237
238                 if (decoder->audio) {
239                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
240                 }
241
242                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
243
244                 while (j != decoder->text.end()) {
245                         (*j)->BitmapStart.connect (
246                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
247                                 );
248                         (*j)->PlainStart.connect (
249                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
250                                 );
251                         (*j)->Stop.connect (
252                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
253                                 );
254
255                         ++j;
256                 }
257
258                 if (decoder->atmos) {
259                         decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
260                 }
261         }
262
263         _stream_states.clear ();
264         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
265                 if (i->content->audio) {
266                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
267                                 _stream_states[j] = StreamState (i, i->content->position ());
268                         }
269                 }
270         }
271
272         _black = Empty (_film, playlist(), bind(&have_video, _1), _playback_length);
273         _silent = Empty (_film, playlist(), bind(&have_audio, _1), _playback_length);
274
275         _last_video_time = DCPTime ();
276         _last_video_eyes = EYES_BOTH;
277         _last_audio_time = DCPTime ();
278 }
279
280 void
281 Player::playlist_content_change (ChangeType type, int property, bool frequent)
282 {
283         if (property == VideoContentProperty::CROP) {
284                 if (type == CHANGE_TYPE_DONE) {
285                         dcp::Size const vcs = video_container_size();
286                         boost::mutex::scoped_lock lm (_mutex);
287                         for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
288                                 i->first->reset_metadata (_film, vcs);
289                         }
290                 }
291         } else {
292                 if (type == CHANGE_TYPE_PENDING) {
293                         /* The player content is probably about to change, so we can't carry on
294                            until that has happened and we've rebuilt our pieces.  Stop pass()
295                            and seek() from working until then.
296                         */
297                         ++_suspended;
298                 } else if (type == CHANGE_TYPE_DONE) {
299                         /* A change in our content has gone through.  Re-build our pieces. */
300                         setup_pieces ();
301                         --_suspended;
302                 } else if (type == CHANGE_TYPE_CANCELLED) {
303                         --_suspended;
304                 }
305         }
306
307         Change (type, property, frequent);
308 }
309
310 void
311 Player::set_video_container_size (dcp::Size s)
312 {
313         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
314
315         {
316                 boost::mutex::scoped_lock lm (_mutex);
317
318                 if (s == _video_container_size) {
319                         lm.unlock ();
320                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
321                         return;
322                 }
323
324                 _video_container_size = s;
325
326                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
327                 _black_image->make_black ();
328         }
329
330         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
331 }
332
333 void
334 Player::playlist_change (ChangeType type)
335 {
336         if (type == CHANGE_TYPE_DONE) {
337                 setup_pieces ();
338         }
339         Change (type, PlayerProperty::PLAYLIST, false);
340 }
341
342 void
343 Player::film_change (ChangeType type, Film::Property p)
344 {
345         /* Here we should notice Film properties that affect our output, and
346            alert listeners that our output now would be different to how it was
347            last time we were run.
348         */
349
350         if (p == Film::CONTAINER) {
351                 Change (type, PlayerProperty::FILM_CONTAINER, false);
352         } else if (p == Film::VIDEO_FRAME_RATE) {
353                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
354                    so we need new pieces here.
355                 */
356                 if (type == CHANGE_TYPE_DONE) {
357                         setup_pieces ();
358                 }
359                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
360         } else if (p == Film::AUDIO_PROCESSOR) {
361                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
362                         boost::mutex::scoped_lock lm (_mutex);
363                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
364                 }
365         } else if (p == Film::AUDIO_CHANNELS) {
366                 if (type == CHANGE_TYPE_DONE) {
367                         boost::mutex::scoped_lock lm (_mutex);
368                         _audio_merger.clear ();
369                 }
370         }
371 }
372
373 shared_ptr<PlayerVideo>
374 Player::black_player_video_frame (Eyes eyes) const
375 {
376         return shared_ptr<PlayerVideo> (
377                 new PlayerVideo (
378                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
379                         Crop (),
380                         optional<double> (),
381                         _video_container_size,
382                         _video_container_size,
383                         eyes,
384                         PART_WHOLE,
385                         PresetColourConversion::all().front().conversion,
386                         VIDEO_RANGE_FULL,
387                         std::weak_ptr<Content>(),
388                         boost::optional<Frame>(),
389                         false
390                 )
391         );
392 }
393
394 Frame
395 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
396 {
397         DCPTime s = t - piece->content->position ();
398         s = min (piece->content->length_after_trim(_film), s);
399         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
400
401         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
402            then convert that ContentTime to frames at the content's rate.  However this fails for
403            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
404            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
405
406            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
407         */
408         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
409 }
410
411 DCPTime
412 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
413 {
414         /* See comment in dcp_to_content_video */
415         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
416         return d + piece->content->position();
417 }
418
419 Frame
420 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
421 {
422         DCPTime s = t - piece->content->position ();
423         s = min (piece->content->length_after_trim(_film), s);
424         /* See notes in dcp_to_content_video */
425         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
426 }
427
428 DCPTime
429 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
430 {
431         /* See comment in dcp_to_content_video */
432         return DCPTime::from_frames (f, _film->audio_frame_rate())
433                 - DCPTime (piece->content->trim_start(), piece->frc)
434                 + piece->content->position();
435 }
436
437 ContentTime
438 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
439 {
440         DCPTime s = t - piece->content->position ();
441         s = min (piece->content->length_after_trim(_film), s);
442         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
443 }
444
445 DCPTime
446 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
447 {
448         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
449 }
450
451 vector<FontData>
452 Player::get_subtitle_fonts ()
453 {
454         boost::mutex::scoped_lock lm (_mutex);
455
456         vector<FontData> fonts;
457         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
458                 /* XXX: things may go wrong if there are duplicate font IDs
459                    with different font files.
460                 */
461                 vector<FontData> f = i->decoder->fonts ();
462                 copy (f.begin(), f.end(), back_inserter(fonts));
463         }
464
465         return fonts;
466 }
467
468 /** Set this player never to produce any video data */
469 void
470 Player::set_ignore_video ()
471 {
472         boost::mutex::scoped_lock lm (_mutex);
473         _ignore_video = true;
474         setup_pieces_unlocked ();
475 }
476
477 void
478 Player::set_ignore_audio ()
479 {
480         boost::mutex::scoped_lock lm (_mutex);
481         _ignore_audio = true;
482         setup_pieces_unlocked ();
483 }
484
485 void
486 Player::set_ignore_text ()
487 {
488         boost::mutex::scoped_lock lm (_mutex);
489         _ignore_text = true;
490         setup_pieces_unlocked ();
491 }
492
493 /** Set the player to always burn open texts into the image regardless of the content settings */
494 void
495 Player::set_always_burn_open_subtitles ()
496 {
497         boost::mutex::scoped_lock lm (_mutex);
498         _always_burn_open_subtitles = true;
499 }
500
501 /** Sets up the player to be faster, possibly at the expense of quality */
502 void
503 Player::set_fast ()
504 {
505         boost::mutex::scoped_lock lm (_mutex);
506         _fast = true;
507         setup_pieces_unlocked ();
508 }
509
510 void
511 Player::set_play_referenced ()
512 {
513         boost::mutex::scoped_lock lm (_mutex);
514         _play_referenced = true;
515         setup_pieces_unlocked ();
516 }
517
518 static void
519 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
520 {
521         DCPOMATIC_ASSERT (r);
522         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
523         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
524         if (r->actual_duration() > 0) {
525                 a.push_back (
526                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
527                         );
528         }
529 }
530
531 list<ReferencedReelAsset>
532 Player::get_reel_assets ()
533 {
534         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
535
536         list<ReferencedReelAsset> a;
537
538         BOOST_FOREACH (shared_ptr<Content> i, playlist()->content()) {
539                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
540                 if (!j) {
541                         continue;
542                 }
543
544                 scoped_ptr<DCPDecoder> decoder;
545                 try {
546                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
547                 } catch (...) {
548                         return a;
549                 }
550
551                 DCPOMATIC_ASSERT (j->video_frame_rate ());
552                 double const cfr = j->video_frame_rate().get();
553                 Frame const trim_start = j->trim_start().frames_round (cfr);
554                 Frame const trim_end = j->trim_end().frames_round (cfr);
555                 int const ffr = _film->video_frame_rate ();
556
557                 /* position in the asset from the start */
558                 int64_t offset_from_start = 0;
559                 /* position in the asset from the end */
560                 int64_t offset_from_end = 0;
561                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
562                         /* Assume that main picture duration is the length of the reel */
563                         offset_from_end += k->main_picture()->actual_duration();
564                 }
565
566                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
567
568                         /* Assume that main picture duration is the length of the reel */
569                         int64_t const reel_duration = k->main_picture()->actual_duration();
570
571                         /* See doc/design/trim_reels.svg */
572                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
573                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
574
575                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
576                         if (j->reference_video ()) {
577                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
578                         }
579
580                         if (j->reference_audio ()) {
581                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
582                         }
583
584                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
585                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
586                         }
587
588                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
589                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
590                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
591                                 }
592                         }
593
594                         offset_from_start += reel_duration;
595                         offset_from_end -= reel_duration;
596                 }
597         }
598
599         return a;
600 }
601
602 bool
603 Player::pass ()
604 {
605         boost::mutex::scoped_lock lm (_mutex);
606
607         if (_suspended) {
608                 /* We can't pass in this state */
609                 LOG_DEBUG_PLAYER_NC ("Player is suspended");
610                 return false;
611         }
612
613         if (_playback_length == DCPTime()) {
614                 /* Special; just give one black frame */
615                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
616                 return true;
617         }
618
619         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
620
621         shared_ptr<Piece> earliest_content;
622         optional<DCPTime> earliest_time;
623
624         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
625                 if (i->done) {
626                         continue;
627                 }
628
629                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
630                 if (t > i->content->end(_film)) {
631                         i->done = true;
632                 } else {
633
634                         /* Given two choices at the same time, pick the one with texts so we see it before
635                            the video.
636                         */
637                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
638                                 earliest_time = t;
639                                 earliest_content = i;
640                         }
641                 }
642         }
643
644         bool done = false;
645
646         enum {
647                 NONE,
648                 CONTENT,
649                 BLACK,
650                 SILENT
651         } which = NONE;
652
653         if (earliest_content) {
654                 which = CONTENT;
655         }
656
657         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
658                 earliest_time = _black.position ();
659                 which = BLACK;
660         }
661
662         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
663                 earliest_time = _silent.position ();
664                 which = SILENT;
665         }
666
667         switch (which) {
668         case CONTENT:
669         {
670                 LOG_DEBUG_PLAYER ("Calling pass() on %1", earliest_content->content->path(0));
671                 earliest_content->done = earliest_content->decoder->pass ();
672                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
673                 if (dcp && !_play_referenced && dcp->reference_audio()) {
674                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
675                            to `hide' the fact that no audio was emitted during the referenced DCP (though
676                            we need to behave as though it was).
677                         */
678                         _last_audio_time = dcp->end (_film);
679                 }
680                 break;
681         }
682         case BLACK:
683                 LOG_DEBUG_PLAYER ("Emit black for gap at %1", to_string(_black.position()));
684                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
685                 _black.set_position (_black.position() + one_video_frame());
686                 break;
687         case SILENT:
688         {
689                 LOG_DEBUG_PLAYER ("Emit silence for gap at %1", to_string(_silent.position()));
690                 DCPTimePeriod period (_silent.period_at_position());
691                 if (_last_audio_time) {
692                         /* Sometimes the thing that happened last finishes fractionally before
693                            or after this silence.  Bodge the start time of the silence to fix it.
694                            I think this is nothing to worry about since we will just add or
695                            remove a little silence at the end of some content.
696                         */
697                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
698                         /* Let's not worry about less than a frame at 24fps */
699                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
700                         if (error >= too_much_error) {
701                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
702                         }
703                         DCPOMATIC_ASSERT (error < too_much_error);
704                         period.from = *_last_audio_time;
705                 }
706                 if (period.duration() > one_video_frame()) {
707                         period.to = period.from + one_video_frame();
708                 }
709                 fill_audio (period);
710                 _silent.set_position (period.to);
711                 break;
712         }
713         case NONE:
714                 done = true;
715                 break;
716         }
717
718         /* Emit any audio that is ready */
719
720         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
721            of our streams, or the position of the _silent.
722         */
723         DCPTime pull_to = _playback_length;
724         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
725                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
726                         pull_to = i->second.last_push_end;
727                 }
728         }
729         if (!_silent.done() && _silent.position() < pull_to) {
730                 pull_to = _silent.position();
731         }
732
733         LOG_DEBUG_PLAYER("Emitting audio up to %1", to_string(pull_to));
734         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
735         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
736                 if (_last_audio_time && i->second < *_last_audio_time) {
737                         /* This new data comes before the last we emitted (or the last seek); discard it */
738                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
739                         if (!cut.first) {
740                                 continue;
741                         }
742                         *i = cut;
743                 } else if (_last_audio_time && i->second > *_last_audio_time) {
744                         /* There's a gap between this data and the last we emitted; fill with silence */
745                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
746                 }
747
748                 emit_audio (i->first, i->second);
749         }
750
751         if (done) {
752                 _shuffler->flush ();
753                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
754                         do_emit_video(i->first, i->second);
755                 }
756         }
757
758         return done;
759 }
760
761 /** @return Open subtitles for the frame at the given time, converted to images */
762 optional<PositionImage>
763 Player::open_subtitles_for_frame (DCPTime time) const
764 {
765         list<PositionImage> captions;
766         int const vfr = _film->video_frame_rate();
767
768         BOOST_FOREACH (
769                 PlayerText j,
770                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
771                 ) {
772
773                 /* Bitmap subtitles */
774                 BOOST_FOREACH (BitmapText i, j.bitmap) {
775                         if (!i.image) {
776                                 continue;
777                         }
778
779                         /* i.image will already have been scaled to fit _video_container_size */
780                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
781
782                         captions.push_back (
783                                 PositionImage (
784                                         i.image,
785                                         Position<int> (
786                                                 lrint (_video_container_size.width * i.rectangle.x),
787                                                 lrint (_video_container_size.height * i.rectangle.y)
788                                                 )
789                                         )
790                                 );
791                 }
792
793                 /* String subtitles (rendered to an image) */
794                 if (!j.string.empty ()) {
795                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
796                         copy (s.begin(), s.end(), back_inserter (captions));
797                 }
798         }
799
800         if (captions.empty ()) {
801                 return optional<PositionImage> ();
802         }
803
804         return merge (captions);
805 }
806
807 void
808 Player::video (weak_ptr<Piece> wp, ContentVideo video)
809 {
810         shared_ptr<Piece> piece = wp.lock ();
811         if (!piece) {
812                 return;
813         }
814
815         if (!piece->content->video->use()) {
816                 return;
817         }
818
819         FrameRateChange frc (_film, piece->content);
820         if (frc.skip && (video.frame % 2) == 1) {
821                 return;
822         }
823
824         /* Time of the first frame we will emit */
825         DCPTime const time = content_video_to_dcp (piece, video.frame);
826         LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
827
828         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
829            if it's after the content's period here as in that case we still need to fill any gap between
830            `now' and the end of the content's period.
831         */
832         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
833                 return;
834         }
835
836         /* Fill gaps that we discover now that we have some video which needs to be emitted.
837            This is where we need to fill to.
838         */
839         DCPTime fill_to = min (time, piece->content->end(_film));
840
841         if (_last_video_time) {
842                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
843
844                 /* Fill if we have more than half a frame to do */
845                 if ((fill_to - fill_from) > one_video_frame() / 2) {
846                         LastVideoMap::const_iterator last = _last_video.find (wp);
847                         if (_film->three_d()) {
848                                 Eyes fill_to_eyes = video.eyes;
849                                 if (fill_to_eyes == EYES_BOTH) {
850                                         fill_to_eyes = EYES_LEFT;
851                                 }
852                                 if (fill_to == piece->content->end(_film)) {
853                                         /* Don't fill after the end of the content */
854                                         fill_to_eyes = EYES_LEFT;
855                                 }
856                                 DCPTime j = fill_from;
857                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
858                                 if (eyes == EYES_BOTH) {
859                                         eyes = EYES_LEFT;
860                                 }
861                                 while (j < fill_to || eyes != fill_to_eyes) {
862                                         if (last != _last_video.end()) {
863                                                 LOG_DEBUG_PLAYER("Fill using last video at %1 in 3D mode", to_string(j));
864                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
865                                                 copy->set_eyes (eyes);
866                                                 emit_video (copy, j);
867                                         } else {
868                                                 LOG_DEBUG_PLAYER("Fill using black at %1 in 3D mode", to_string(j));
869                                                 emit_video (black_player_video_frame(eyes), j);
870                                         }
871                                         if (eyes == EYES_RIGHT) {
872                                                 j += one_video_frame();
873                                         }
874                                         eyes = increment_eyes (eyes);
875                                 }
876                         } else {
877                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
878                                         if (last != _last_video.end()) {
879                                                 emit_video (last->second, j);
880                                         } else {
881                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
882                                         }
883                                 }
884                         }
885                 }
886         }
887
888         _last_video[wp].reset (
889                 new PlayerVideo (
890                         video.image,
891                         piece->content->video->crop (),
892                         piece->content->video->fade (_film, video.frame),
893                         scale_for_display(piece->content->video->scaled_size(_film->frame_size()), _video_container_size, _film->frame_size()),
894                         _video_container_size,
895                         video.eyes,
896                         video.part,
897                         piece->content->video->colour_conversion(),
898                         piece->content->video->range(),
899                         piece->content,
900                         video.frame,
901                         false
902                         )
903                 );
904
905         DCPTime t = time;
906         for (int i = 0; i < frc.repeat; ++i) {
907                 if (t < piece->content->end(_film)) {
908                         emit_video (_last_video[wp], t);
909                 }
910                 t += one_video_frame ();
911         }
912 }
913
914 void
915 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
916 {
917         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
918
919         shared_ptr<Piece> piece = wp.lock ();
920         if (!piece) {
921                 return;
922         }
923
924         shared_ptr<AudioContent> content = piece->content->audio;
925         DCPOMATIC_ASSERT (content);
926
927         int const rfr = content->resampled_frame_rate (_film);
928
929         /* Compute time in the DCP */
930         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
931         LOG_DEBUG_PLAYER("Received audio frame %1 at %2", content_audio.frame, to_string(time));
932
933         /* And the end of this block in the DCP */
934         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
935
936         /* Remove anything that comes before the start or after the end of the content */
937         if (time < piece->content->position()) {
938                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
939                 if (!cut.first) {
940                         /* This audio is entirely discarded */
941                         return;
942                 }
943                 content_audio.audio = cut.first;
944                 time = cut.second;
945         } else if (time > piece->content->end(_film)) {
946                 /* Discard it all */
947                 return;
948         } else if (end > piece->content->end(_film)) {
949                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
950                 if (remaining_frames == 0) {
951                         return;
952                 }
953                 content_audio.audio.reset (new AudioBuffers(content_audio.audio, remaining_frames, 0));
954         }
955
956         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
957
958         /* Gain */
959
960         if (content->gain() != 0) {
961                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
962                 gain->apply_gain (content->gain ());
963                 content_audio.audio = gain;
964         }
965
966         /* Remap */
967
968         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
969
970         /* Process */
971
972         if (_audio_processor) {
973                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
974         }
975
976         /* Push */
977
978         _audio_merger.push (content_audio.audio, time);
979         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
980         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
981 }
982
983 void
984 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
985 {
986         shared_ptr<Piece> piece = wp.lock ();
987         shared_ptr<const TextContent> text = wc.lock ();
988         if (!piece || !text) {
989                 return;
990         }
991
992         /* Apply content's subtitle offsets */
993         subtitle.sub.rectangle.x += text->x_offset ();
994         subtitle.sub.rectangle.y += text->y_offset ();
995
996         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
997         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
998         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
999
1000         /* Apply content's subtitle scale */
1001         subtitle.sub.rectangle.width *= text->x_scale ();
1002         subtitle.sub.rectangle.height *= text->y_scale ();
1003
1004         PlayerText ps;
1005         shared_ptr<Image> image = subtitle.sub.image;
1006
1007         /* We will scale the subtitle up to fit _video_container_size */
1008         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
1009         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
1010         if (width == 0 || height == 0) {
1011                 return;
1012         }
1013
1014         dcp::Size scaled_size (width, height);
1015         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
1016         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
1017
1018         _active_texts[text->type()].add_from (wc, ps, from);
1019 }
1020
1021 void
1022 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
1023 {
1024         shared_ptr<Piece> piece = wp.lock ();
1025         shared_ptr<const TextContent> text = wc.lock ();
1026         if (!piece || !text) {
1027                 return;
1028         }
1029
1030         PlayerText ps;
1031         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
1032
1033         if (from > piece->content->end(_film)) {
1034                 return;
1035         }
1036
1037         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
1038                 s.set_h_position (s.h_position() + text->x_offset ());
1039                 s.set_v_position (s.v_position() + text->y_offset ());
1040                 float const xs = text->x_scale();
1041                 float const ys = text->y_scale();
1042                 float size = s.size();
1043
1044                 /* Adjust size to express the common part of the scaling;
1045                    e.g. if xs = ys = 0.5 we scale size by 2.
1046                 */
1047                 if (xs > 1e-5 && ys > 1e-5) {
1048                         size *= 1 / min (1 / xs, 1 / ys);
1049                 }
1050                 s.set_size (size);
1051
1052                 /* Then express aspect ratio changes */
1053                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1054                         s.set_aspect_adjust (xs / ys);
1055                 }
1056
1057                 s.set_in (dcp::Time(from.seconds(), 1000));
1058                 ps.string.push_back (StringText (s, text->outline_width()));
1059                 ps.add_fonts (text->fonts ());
1060         }
1061
1062         _active_texts[text->type()].add_from (wc, ps, from);
1063 }
1064
1065 void
1066 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1067 {
1068         shared_ptr<const TextContent> text = wc.lock ();
1069         if (!text) {
1070                 return;
1071         }
1072
1073         if (!_active_texts[text->type()].have(wc)) {
1074                 return;
1075         }
1076
1077         shared_ptr<Piece> piece = wp.lock ();
1078         if (!piece) {
1079                 return;
1080         }
1081
1082         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1083
1084         if (dcp_to > piece->content->end(_film)) {
1085                 return;
1086         }
1087
1088         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1089
1090         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1091         if (text->use() && !always && !text->burn()) {
1092                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1093         }
1094 }
1095
1096 void
1097 Player::seek (DCPTime time, bool accurate)
1098 {
1099         boost::mutex::scoped_lock lm (_mutex);
1100         LOG_DEBUG_PLAYER("Seek to %1 (%2accurate)", to_string(time), accurate ? "" : "in");
1101
1102         if (_suspended) {
1103                 /* We can't seek in this state */
1104                 return;
1105         }
1106
1107         if (_shuffler) {
1108                 _shuffler->clear ();
1109         }
1110
1111         _delay.clear ();
1112
1113         if (_audio_processor) {
1114                 _audio_processor->flush ();
1115         }
1116
1117         _audio_merger.clear ();
1118         for (int i = 0; i < TEXT_COUNT; ++i) {
1119                 _active_texts[i].clear ();
1120         }
1121
1122         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1123                 if (time < i->content->position()) {
1124                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1125                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1126                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1127                            been trimmed to a point between keyframes, or something).
1128                         */
1129                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1130                         i->done = false;
1131                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1132                         /* During; seek to position */
1133                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1134                         i->done = false;
1135                 } else {
1136                         /* After; this piece is done */
1137                         i->done = true;
1138                 }
1139         }
1140
1141         if (accurate) {
1142                 _last_video_time = time;
1143                 _last_video_eyes = EYES_LEFT;
1144                 _last_audio_time = time;
1145         } else {
1146                 _last_video_time = optional<DCPTime>();
1147                 _last_video_eyes = optional<Eyes>();
1148                 _last_audio_time = optional<DCPTime>();
1149         }
1150
1151         _black.set_position (time);
1152         _silent.set_position (time);
1153
1154         _last_video.clear ();
1155 }
1156
1157 void
1158 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1159 {
1160         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1161            player before the video that requires them.
1162         */
1163         _delay.push_back (make_pair (pv, time));
1164
1165         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1166                 _last_video_time = time + one_video_frame();
1167         }
1168         _last_video_eyes = increment_eyes (pv->eyes());
1169
1170         if (_delay.size() < 3) {
1171                 return;
1172         }
1173
1174         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1175         _delay.pop_front();
1176         do_emit_video (to_do.first, to_do.second);
1177 }
1178
1179 void
1180 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1181 {
1182         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1183                 for (int i = 0; i < TEXT_COUNT; ++i) {
1184                         _active_texts[i].clear_before (time);
1185                 }
1186         }
1187
1188         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1189         if (subtitles) {
1190                 pv->set_text (subtitles.get ());
1191         }
1192
1193         Video (pv, time);
1194 }
1195
1196 void
1197 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1198 {
1199         /* Log if the assert below is about to fail */
1200         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1201                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1202         }
1203
1204         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1205         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1206         Audio (data, time, _film->audio_frame_rate());
1207         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1208 }
1209
1210 void
1211 Player::fill_audio (DCPTimePeriod period)
1212 {
1213         if (period.from == period.to) {
1214                 return;
1215         }
1216
1217         DCPOMATIC_ASSERT (period.from < period.to);
1218
1219         DCPTime t = period.from;
1220         while (t < period.to) {
1221                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1222                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1223                 if (samples) {
1224                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1225                         silence->make_silent ();
1226                         emit_audio (silence, t);
1227                 }
1228                 t += block;
1229         }
1230 }
1231
1232 DCPTime
1233 Player::one_video_frame () const
1234 {
1235         return DCPTime::from_frames (1, _film->video_frame_rate ());
1236 }
1237
1238 pair<shared_ptr<AudioBuffers>, DCPTime>
1239 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1240 {
1241         DCPTime const discard_time = discard_to - time;
1242         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1243         Frame remaining_frames = audio->frames() - discard_frames;
1244         if (remaining_frames <= 0) {
1245                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1246         }
1247         shared_ptr<AudioBuffers> cut (new AudioBuffers(audio, remaining_frames, discard_frames));
1248         return make_pair(cut, time + discard_time);
1249 }
1250
1251 void
1252 Player::set_dcp_decode_reduction (optional<int> reduction)
1253 {
1254         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1255
1256         {
1257                 boost::mutex::scoped_lock lm (_mutex);
1258
1259                 if (reduction == _dcp_decode_reduction) {
1260                         lm.unlock ();
1261                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1262                         return;
1263                 }
1264
1265                 _dcp_decode_reduction = reduction;
1266                 setup_pieces_unlocked ();
1267         }
1268
1269         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1270 }
1271
1272 optional<DCPTime>
1273 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1274 {
1275         boost::mutex::scoped_lock lm (_mutex);
1276
1277         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1278                 if (i->content == content) {
1279                         return content_time_to_dcp (i, t);
1280                 }
1281         }
1282
1283         /* We couldn't find this content; perhaps things are being changed over */
1284         return optional<DCPTime>();
1285 }
1286
1287
1288 shared_ptr<const Playlist>
1289 Player::playlist () const
1290 {
1291         return _playlist ? _playlist : _film->playlist();
1292 }
1293
1294
1295 void
1296 Player::atmos (weak_ptr<Piece>, ContentAtmos data)
1297 {
1298         Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
1299 }
1300