Fix crash with bitmapped subs that have zero width or height.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2020 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include "timer.h"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <dcp/reel_closed_caption_asset.h>
57 #include <boost/foreach.hpp>
58 #include <stdint.h>
59 #include <algorithm>
60 #include <iostream>
61
62 #include "i18n.h"
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79 using namespace dcpomatic;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (0)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _tolerant (film->tolerant())
97         , _play_referenced (false)
98         , _audio_merger (_film->audio_frame_rate())
99         , _shuffler (0)
100 {
101         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
102         /* The butler must hear about this first, so since we are proxying this through to the butler we must
103            be first.
104         */
105         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
106         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
107         set_video_container_size (_film->frame_size ());
108
109         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
110
111         setup_pieces ();
112         seek (DCPTime (), true);
113 }
114
115 Player::~Player ()
116 {
117         delete _shuffler;
118 }
119
120 void
121 Player::setup_pieces ()
122 {
123         boost::mutex::scoped_lock lm (_mutex);
124         setup_pieces_unlocked ();
125 }
126
127 bool
128 have_video (shared_ptr<Piece> piece)
129 {
130         return piece->decoder && piece->decoder->video;
131 }
132
133 bool
134 have_audio (shared_ptr<Piece> piece)
135 {
136         return piece->decoder && piece->decoder->audio;
137 }
138
139 void
140 Player::setup_pieces_unlocked ()
141 {
142         list<shared_ptr<Piece> > old_pieces = _pieces;
143         _pieces.clear ();
144
145         delete _shuffler;
146         _shuffler = new Shuffler();
147         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
148
149         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
150
151                 if (!i->paths_valid ()) {
152                         continue;
153                 }
154
155                 if (_ignore_video && _ignore_audio && i->text.empty()) {
156                         /* We're only interested in text and this content has none */
157                         continue;
158                 }
159
160                 shared_ptr<Decoder> old_decoder;
161                 BOOST_FOREACH (shared_ptr<Piece> j, old_pieces) {
162                         if (j->content == i) {
163                                 old_decoder = j->decoder;
164                                 break;
165                         }
166                 }
167
168                 shared_ptr<Decoder> decoder = decoder_factory (_film, i, _fast, _tolerant, old_decoder);
169                 FrameRateChange frc (_film, i);
170
171                 if (!decoder) {
172                         /* Not something that we can decode; e.g. Atmos content */
173                         continue;
174                 }
175
176                 if (decoder->video && _ignore_video) {
177                         decoder->video->set_ignore (true);
178                 }
179
180                 if (decoder->audio && _ignore_audio) {
181                         decoder->audio->set_ignore (true);
182                 }
183
184                 if (_ignore_text) {
185                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
186                                 i->set_ignore (true);
187                         }
188                 }
189
190                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
191                 if (dcp) {
192                         dcp->set_decode_referenced (_play_referenced);
193                         if (_play_referenced) {
194                                 dcp->set_forced_reduction (_dcp_decode_reduction);
195                         }
196                 }
197
198                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
199                 _pieces.push_back (piece);
200
201                 if (decoder->video) {
202                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
203                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
204                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
205                         } else {
206                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
207                         }
208                 }
209
210                 if (decoder->audio) {
211                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
212                 }
213
214                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
215
216                 while (j != decoder->text.end()) {
217                         (*j)->BitmapStart.connect (
218                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
219                                 );
220                         (*j)->PlainStart.connect (
221                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
222                                 );
223                         (*j)->Stop.connect (
224                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
225                                 );
226
227                         ++j;
228                 }
229         }
230
231         _stream_states.clear ();
232         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
233                 if (i->content->audio) {
234                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
235                                 _stream_states[j] = StreamState (i, i->content->position ());
236                         }
237                 }
238         }
239
240         _black = Empty (_film, _pieces, bind(&have_video, _1));
241         _silent = Empty (_film, _pieces, bind(&have_audio, _1));
242
243         _last_video_time = DCPTime ();
244         _last_video_eyes = EYES_BOTH;
245         _last_audio_time = DCPTime ();
246
247         /* Cached value to save recalculating it on every ::pass */
248         _film_length = _film->length ();
249 }
250
251 void
252 Player::playlist_content_change (ChangeType type, int property, bool frequent)
253 {
254         if (type == CHANGE_TYPE_PENDING) {
255                 /* The player content is probably about to change, so we can't carry on
256                    until that has happened and we've rebuilt our pieces.  Stop pass()
257                    and seek() from working until then.
258                 */
259                 ++_suspended;
260         } else if (type == CHANGE_TYPE_DONE) {
261                 /* A change in our content has gone through.  Re-build our pieces. */
262                 setup_pieces ();
263                 --_suspended;
264         } else if (type == CHANGE_TYPE_CANCELLED) {
265                 --_suspended;
266         }
267
268         Change (type, property, frequent);
269 }
270
271 void
272 Player::set_video_container_size (dcp::Size s)
273 {
274         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
275
276         {
277                 boost::mutex::scoped_lock lm (_mutex);
278
279                 if (s == _video_container_size) {
280                         lm.unlock ();
281                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
282                         return;
283                 }
284
285                 _video_container_size = s;
286
287                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
288                 _black_image->make_black ();
289         }
290
291         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
292 }
293
294 void
295 Player::playlist_change (ChangeType type)
296 {
297         if (type == CHANGE_TYPE_DONE) {
298                 setup_pieces ();
299         }
300         Change (type, PlayerProperty::PLAYLIST, false);
301 }
302
303 void
304 Player::film_change (ChangeType type, Film::Property p)
305 {
306         /* Here we should notice Film properties that affect our output, and
307            alert listeners that our output now would be different to how it was
308            last time we were run.
309         */
310
311         if (p == Film::CONTAINER) {
312                 Change (type, PlayerProperty::FILM_CONTAINER, false);
313         } else if (p == Film::VIDEO_FRAME_RATE) {
314                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
315                    so we need new pieces here.
316                 */
317                 if (type == CHANGE_TYPE_DONE) {
318                         setup_pieces ();
319                 }
320                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321         } else if (p == Film::AUDIO_PROCESSOR) {
322                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
323                         boost::mutex::scoped_lock lm (_mutex);
324                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
325                 }
326         } else if (p == Film::AUDIO_CHANNELS) {
327                 if (type == CHANGE_TYPE_DONE) {
328                         boost::mutex::scoped_lock lm (_mutex);
329                         _audio_merger.clear ();
330                 }
331         }
332 }
333
334 shared_ptr<PlayerVideo>
335 Player::black_player_video_frame (Eyes eyes) const
336 {
337         return shared_ptr<PlayerVideo> (
338                 new PlayerVideo (
339                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
340                         Crop (),
341                         optional<double> (),
342                         _video_container_size,
343                         _video_container_size,
344                         eyes,
345                         PART_WHOLE,
346                         PresetColourConversion::all().front().conversion,
347                         VIDEO_RANGE_FULL,
348                         boost::weak_ptr<Content>(),
349                         boost::optional<Frame>()
350                 )
351         );
352 }
353
354 Frame
355 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
356 {
357         DCPTime s = t - piece->content->position ();
358         s = min (piece->content->length_after_trim(_film), s);
359         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
360
361         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
362            then convert that ContentTime to frames at the content's rate.  However this fails for
363            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
364            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
365
366            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
367         */
368         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
369 }
370
371 DCPTime
372 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
376         return d + piece->content->position();
377 }
378
379 Frame
380 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(_film), s);
384         /* See notes in dcp_to_content_video */
385         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
386 }
387
388 DCPTime
389 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
390 {
391         /* See comment in dcp_to_content_video */
392         return DCPTime::from_frames (f, _film->audio_frame_rate())
393                 - DCPTime (piece->content->trim_start(), piece->frc)
394                 + piece->content->position();
395 }
396
397 ContentTime
398 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
399 {
400         DCPTime s = t - piece->content->position ();
401         s = min (piece->content->length_after_trim(_film), s);
402         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
403 }
404
405 DCPTime
406 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
407 {
408         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
409 }
410
411 list<shared_ptr<Font> >
412 Player::get_subtitle_fonts ()
413 {
414         boost::mutex::scoped_lock lm (_mutex);
415
416         list<shared_ptr<Font> > fonts;
417         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
418                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
419                         /* XXX: things may go wrong if there are duplicate font IDs
420                            with different font files.
421                         */
422                         list<shared_ptr<Font> > f = j->fonts ();
423                         copy (f.begin(), f.end(), back_inserter (fonts));
424                 }
425         }
426
427         return fonts;
428 }
429
430 /** Set this player never to produce any video data */
431 void
432 Player::set_ignore_video ()
433 {
434         boost::mutex::scoped_lock lm (_mutex);
435         _ignore_video = true;
436         setup_pieces_unlocked ();
437 }
438
439 void
440 Player::set_ignore_audio ()
441 {
442         boost::mutex::scoped_lock lm (_mutex);
443         _ignore_audio = true;
444         setup_pieces_unlocked ();
445 }
446
447 void
448 Player::set_ignore_text ()
449 {
450         boost::mutex::scoped_lock lm (_mutex);
451         _ignore_text = true;
452         setup_pieces_unlocked ();
453 }
454
455 /** Set the player to always burn open texts into the image regardless of the content settings */
456 void
457 Player::set_always_burn_open_subtitles ()
458 {
459         boost::mutex::scoped_lock lm (_mutex);
460         _always_burn_open_subtitles = true;
461 }
462
463 /** Sets up the player to be faster, possibly at the expense of quality */
464 void
465 Player::set_fast ()
466 {
467         boost::mutex::scoped_lock lm (_mutex);
468         _fast = true;
469         setup_pieces_unlocked ();
470 }
471
472 void
473 Player::set_play_referenced ()
474 {
475         boost::mutex::scoped_lock lm (_mutex);
476         _play_referenced = true;
477         setup_pieces_unlocked ();
478 }
479
480 static void
481 maybe_add_asset (list<ReferencedReelAsset>& a, shared_ptr<dcp::ReelAsset> r, Frame reel_trim_start, Frame reel_trim_end, DCPTime from, int const ffr)
482 {
483         DCPOMATIC_ASSERT (r);
484         r->set_entry_point (r->entry_point().get_value_or(0) + reel_trim_start);
485         r->set_duration (r->actual_duration() - reel_trim_start - reel_trim_end);
486         if (r->actual_duration() > 0) {
487                 a.push_back (
488                         ReferencedReelAsset(r, DCPTimePeriod(from, from + DCPTime::from_frames(r->actual_duration(), ffr)))
489                         );
490         }
491 }
492
493 list<ReferencedReelAsset>
494 Player::get_reel_assets ()
495 {
496         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
497
498         list<ReferencedReelAsset> a;
499
500         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
501                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
502                 if (!j) {
503                         continue;
504                 }
505
506                 scoped_ptr<DCPDecoder> decoder;
507                 try {
508                         decoder.reset (new DCPDecoder (_film, j, false, false, shared_ptr<DCPDecoder>()));
509                 } catch (...) {
510                         return a;
511                 }
512
513                 DCPOMATIC_ASSERT (j->video_frame_rate ());
514                 double const cfr = j->video_frame_rate().get();
515                 Frame const trim_start = j->trim_start().frames_round (cfr);
516                 Frame const trim_end = j->trim_end().frames_round (cfr);
517                 int const ffr = _film->video_frame_rate ();
518
519                 /* position in the asset from the start */
520                 int64_t offset_from_start = 0;
521                 /* position in the asset from the end */
522                 int64_t offset_from_end = 0;
523                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
524                         /* Assume that main picture duration is the length of the reel */
525                         offset_from_end += k->main_picture()->actual_duration();
526                 }
527
528                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
529
530                         /* Assume that main picture duration is the length of the reel */
531                         int64_t const reel_duration = k->main_picture()->actual_duration();
532
533                         /* See doc/design/trim_reels.svg */
534                         Frame const reel_trim_start = min(reel_duration, max(int64_t(0), trim_start - offset_from_start));
535                         Frame const reel_trim_end =   min(reel_duration, max(int64_t(0), reel_duration - (offset_from_end - trim_end)));
536
537                         DCPTime const from = i->position() + DCPTime::from_frames (offset_from_start, _film->video_frame_rate());
538                         if (j->reference_video ()) {
539                                 maybe_add_asset (a, k->main_picture(), reel_trim_start, reel_trim_end, from, ffr);
540                         }
541
542                         if (j->reference_audio ()) {
543                                 maybe_add_asset (a, k->main_sound(), reel_trim_start, reel_trim_end, from, ffr);
544                         }
545
546                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
547                                 maybe_add_asset (a, k->main_subtitle(), reel_trim_start, reel_trim_end, from, ffr);
548                         }
549
550                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
551                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
552                                         maybe_add_asset (a, l, reel_trim_start, reel_trim_end, from, ffr);
553                                 }
554                         }
555
556                         offset_from_start += reel_duration;
557                         offset_from_end -= reel_duration;
558                 }
559         }
560
561         return a;
562 }
563
564 bool
565 Player::pass ()
566 {
567         boost::mutex::scoped_lock lm (_mutex);
568         DCPOMATIC_ASSERT (_film_length);
569
570         if (_suspended) {
571                 /* We can't pass in this state */
572                 return false;
573         }
574
575         if (*_film_length == DCPTime()) {
576                 /* Special case of an empty Film; just give one black frame */
577                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
578                 return true;
579         }
580
581         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
582
583         shared_ptr<Piece> earliest_content;
584         optional<DCPTime> earliest_time;
585
586         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
587                 if (i->done) {
588                         continue;
589                 }
590
591                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
592                 if (t > i->content->end(_film)) {
593                         i->done = true;
594                 } else {
595
596                         /* Given two choices at the same time, pick the one with texts so we see it before
597                            the video.
598                         */
599                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
600                                 earliest_time = t;
601                                 earliest_content = i;
602                         }
603                 }
604         }
605
606         bool done = false;
607
608         enum {
609                 NONE,
610                 CONTENT,
611                 BLACK,
612                 SILENT
613         } which = NONE;
614
615         if (earliest_content) {
616                 which = CONTENT;
617         }
618
619         if (!_black.done() && !_ignore_video && (!earliest_time || _black.position() < *earliest_time)) {
620                 earliest_time = _black.position ();
621                 which = BLACK;
622         }
623
624         if (!_silent.done() && !_ignore_audio && (!earliest_time || _silent.position() < *earliest_time)) {
625                 earliest_time = _silent.position ();
626                 which = SILENT;
627         }
628
629         switch (which) {
630         case CONTENT:
631         {
632                 earliest_content->done = earliest_content->decoder->pass ();
633                 shared_ptr<DCPContent> dcp = dynamic_pointer_cast<DCPContent>(earliest_content->content);
634                 if (dcp && !_play_referenced && dcp->reference_audio()) {
635                         /* We are skipping some referenced DCP audio content, so we need to update _last_audio_time
636                            to `hide' the fact that no audio was emitted during the referenced DCP (though
637                            we need to behave as though it was).
638                         */
639                         _last_audio_time = dcp->end (_film);
640                 }
641                 break;
642         }
643         case BLACK:
644                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
645                 _black.set_position (_black.position() + one_video_frame());
646                 break;
647         case SILENT:
648         {
649                 DCPTimePeriod period (_silent.period_at_position());
650                 if (_last_audio_time) {
651                         /* Sometimes the thing that happened last finishes fractionally before
652                            or after this silence.  Bodge the start time of the silence to fix it.
653                            I think this is nothing to worry about since we will just add or
654                            remove a little silence at the end of some content.
655                         */
656                         int64_t const error = labs(period.from.get() - _last_audio_time->get());
657                         /* Let's not worry about less than a frame at 24fps */
658                         int64_t const too_much_error = DCPTime::from_frames(1, 24).get();
659                         if (error >= too_much_error) {
660                                 _film->log()->log(String::compose("Silence starting before or after last audio by %1", error), LogEntry::TYPE_ERROR);
661                         }
662                         DCPOMATIC_ASSERT (error < too_much_error);
663                         period.from = *_last_audio_time;
664                 }
665                 if (period.duration() > one_video_frame()) {
666                         period.to = period.from + one_video_frame();
667                 }
668                 fill_audio (period);
669                 _silent.set_position (period.to);
670                 break;
671         }
672         case NONE:
673                 done = true;
674                 break;
675         }
676
677         /* Emit any audio that is ready */
678
679         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
680            of our streams, or the position of the _silent.
681         */
682         DCPTime pull_to = *_film_length;
683         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
684                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
685                         pull_to = i->second.last_push_end;
686                 }
687         }
688         if (!_silent.done() && _silent.position() < pull_to) {
689                 pull_to = _silent.position();
690         }
691
692         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
693         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
694                 if (_last_audio_time && i->second < *_last_audio_time) {
695                         /* This new data comes before the last we emitted (or the last seek); discard it */
696                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
697                         if (!cut.first) {
698                                 continue;
699                         }
700                         *i = cut;
701                 } else if (_last_audio_time && i->second > *_last_audio_time) {
702                         /* There's a gap between this data and the last we emitted; fill with silence */
703                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
704                 }
705
706                 emit_audio (i->first, i->second);
707         }
708
709         if (done) {
710                 _shuffler->flush ();
711                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
712                         do_emit_video(i->first, i->second);
713                 }
714         }
715
716         return done;
717 }
718
719 /** @return Open subtitles for the frame at the given time, converted to images */
720 optional<PositionImage>
721 Player::open_subtitles_for_frame (DCPTime time) const
722 {
723         list<PositionImage> captions;
724         int const vfr = _film->video_frame_rate();
725
726         BOOST_FOREACH (
727                 PlayerText j,
728                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
729                 ) {
730
731                 /* Bitmap subtitles */
732                 BOOST_FOREACH (BitmapText i, j.bitmap) {
733                         if (!i.image) {
734                                 continue;
735                         }
736
737                         /* i.image will already have been scaled to fit _video_container_size */
738                         dcp::Size scaled_size (i.rectangle.width * _video_container_size.width, i.rectangle.height * _video_container_size.height);
739
740                         captions.push_back (
741                                 PositionImage (
742                                         i.image,
743                                         Position<int> (
744                                                 lrint (_video_container_size.width * i.rectangle.x),
745                                                 lrint (_video_container_size.height * i.rectangle.y)
746                                                 )
747                                         )
748                                 );
749                 }
750
751                 /* String subtitles (rendered to an image) */
752                 if (!j.string.empty ()) {
753                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
754                         copy (s.begin(), s.end(), back_inserter (captions));
755                 }
756         }
757
758         if (captions.empty ()) {
759                 return optional<PositionImage> ();
760         }
761
762         return merge (captions);
763 }
764
765 void
766 Player::video (weak_ptr<Piece> wp, ContentVideo video)
767 {
768         shared_ptr<Piece> piece = wp.lock ();
769         if (!piece) {
770                 return;
771         }
772
773         FrameRateChange frc (_film, piece->content);
774         if (frc.skip && (video.frame % 2) == 1) {
775                 return;
776         }
777
778         /* Time of the first frame we will emit */
779         DCPTime const time = content_video_to_dcp (piece, video.frame);
780
781         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
782            if it's after the content's period here as in that case we still need to fill any gap between
783            `now' and the end of the content's period.
784         */
785         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
786                 return;
787         }
788
789         /* Fill gaps that we discover now that we have some video which needs to be emitted.
790            This is where we need to fill to.
791         */
792         DCPTime fill_to = min (time, piece->content->end(_film));
793
794         if (_last_video_time) {
795                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
796
797                 /* Fill if we have more than half a frame to do */
798                 if ((fill_to - fill_from) > one_video_frame() / 2) {
799                         LastVideoMap::const_iterator last = _last_video.find (wp);
800                         if (_film->three_d()) {
801                                 Eyes fill_to_eyes = video.eyes;
802                                 if (fill_to_eyes == EYES_BOTH) {
803                                         fill_to_eyes = EYES_LEFT;
804                                 }
805                                 if (fill_to == piece->content->end(_film)) {
806                                         /* Don't fill after the end of the content */
807                                         fill_to_eyes = EYES_LEFT;
808                                 }
809                                 DCPTime j = fill_from;
810                                 Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
811                                 if (eyes == EYES_BOTH) {
812                                         eyes = EYES_LEFT;
813                                 }
814                                 while (j < fill_to || eyes != fill_to_eyes) {
815                                         if (last != _last_video.end()) {
816                                                 shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
817                                                 copy->set_eyes (eyes);
818                                                 emit_video (copy, j);
819                                         } else {
820                                                 emit_video (black_player_video_frame(eyes), j);
821                                         }
822                                         if (eyes == EYES_RIGHT) {
823                                                 j += one_video_frame();
824                                         }
825                                         eyes = increment_eyes (eyes);
826                                 }
827                         } else {
828                                 for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
829                                         if (last != _last_video.end()) {
830                                                 emit_video (last->second, j);
831                                         } else {
832                                                 emit_video (black_player_video_frame(EYES_BOTH), j);
833                                         }
834                                 }
835                         }
836                 }
837         }
838
839         _last_video[wp].reset (
840                 new PlayerVideo (
841                         video.image,
842                         piece->content->video->crop (),
843                         piece->content->video->fade (_film, video.frame),
844                         piece->content->video->scale().size (
845                                 piece->content->video, _video_container_size, _film->frame_size ()
846                                 ),
847                         _video_container_size,
848                         video.eyes,
849                         video.part,
850                         piece->content->video->colour_conversion(),
851                         piece->content->video->range(),
852                         piece->content,
853                         video.frame
854                         )
855                 );
856
857         DCPTime t = time;
858         for (int i = 0; i < frc.repeat; ++i) {
859                 if (t < piece->content->end(_film)) {
860                         emit_video (_last_video[wp], t);
861                 }
862                 t += one_video_frame ();
863         }
864 }
865
866 void
867 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
868 {
869         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
870
871         shared_ptr<Piece> piece = wp.lock ();
872         if (!piece) {
873                 return;
874         }
875
876         shared_ptr<AudioContent> content = piece->content->audio;
877         DCPOMATIC_ASSERT (content);
878
879         int const rfr = content->resampled_frame_rate (_film);
880
881         /* Compute time in the DCP */
882         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
883         /* And the end of this block in the DCP */
884         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), rfr);
885
886         /* Remove anything that comes before the start or after the end of the content */
887         if (time < piece->content->position()) {
888                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
889                 if (!cut.first) {
890                         /* This audio is entirely discarded */
891                         return;
892                 }
893                 content_audio.audio = cut.first;
894                 time = cut.second;
895         } else if (time > piece->content->end(_film)) {
896                 /* Discard it all */
897                 return;
898         } else if (end > piece->content->end(_film)) {
899                 Frame const remaining_frames = DCPTime(piece->content->end(_film) - time).frames_round(rfr);
900                 if (remaining_frames == 0) {
901                         return;
902                 }
903                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
904                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
905                 content_audio.audio = cut;
906         }
907
908         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
909
910         /* Gain */
911
912         if (content->gain() != 0) {
913                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
914                 gain->apply_gain (content->gain ());
915                 content_audio.audio = gain;
916         }
917
918         /* Remap */
919
920         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
921
922         /* Process */
923
924         if (_audio_processor) {
925                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
926         }
927
928         /* Push */
929
930         _audio_merger.push (content_audio.audio, time);
931         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
932         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
933 }
934
935 void
936 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
937 {
938         shared_ptr<Piece> piece = wp.lock ();
939         shared_ptr<const TextContent> text = wc.lock ();
940         if (!piece || !text) {
941                 return;
942         }
943
944         /* Apply content's subtitle offsets */
945         subtitle.sub.rectangle.x += text->x_offset ();
946         subtitle.sub.rectangle.y += text->y_offset ();
947
948         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
949         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
950         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
951
952         /* Apply content's subtitle scale */
953         subtitle.sub.rectangle.width *= text->x_scale ();
954         subtitle.sub.rectangle.height *= text->y_scale ();
955
956         PlayerText ps;
957         shared_ptr<Image> image = subtitle.sub.image;
958
959         /* We will scale the subtitle up to fit _video_container_size */
960         int const width = subtitle.sub.rectangle.width * _video_container_size.width;
961         int const height = subtitle.sub.rectangle.height * _video_container_size.height;
962         if (width == 0 || height == 0) {
963                 return;
964         }
965
966         dcp::Size scaled_size (width, height);
967         ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUV_TO_RGB_REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
968         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
969
970         _active_texts[text->type()].add_from (wc, ps, from);
971 }
972
973 void
974 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
975 {
976         shared_ptr<Piece> piece = wp.lock ();
977         shared_ptr<const TextContent> text = wc.lock ();
978         if (!piece || !text) {
979                 return;
980         }
981
982         PlayerText ps;
983         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
984
985         if (from > piece->content->end(_film)) {
986                 return;
987         }
988
989         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
990                 s.set_h_position (s.h_position() + text->x_offset ());
991                 s.set_v_position (s.v_position() + text->y_offset ());
992                 float const xs = text->x_scale();
993                 float const ys = text->y_scale();
994                 float size = s.size();
995
996                 /* Adjust size to express the common part of the scaling;
997                    e.g. if xs = ys = 0.5 we scale size by 2.
998                 */
999                 if (xs > 1e-5 && ys > 1e-5) {
1000                         size *= 1 / min (1 / xs, 1 / ys);
1001                 }
1002                 s.set_size (size);
1003
1004                 /* Then express aspect ratio changes */
1005                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
1006                         s.set_aspect_adjust (xs / ys);
1007                 }
1008
1009                 s.set_in (dcp::Time(from.seconds(), 1000));
1010                 ps.string.push_back (StringText (s, text->outline_width()));
1011                 ps.add_fonts (text->fonts ());
1012         }
1013
1014         _active_texts[text->type()].add_from (wc, ps, from);
1015 }
1016
1017 void
1018 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
1019 {
1020         shared_ptr<const TextContent> text = wc.lock ();
1021         if (!text) {
1022                 return;
1023         }
1024
1025         if (!_active_texts[text->type()].have(wc)) {
1026                 return;
1027         }
1028
1029         shared_ptr<Piece> piece = wp.lock ();
1030         if (!piece) {
1031                 return;
1032         }
1033
1034         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1035
1036         if (dcp_to > piece->content->end(_film)) {
1037                 return;
1038         }
1039
1040         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
1041
1042         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1043         if (text->use() && !always && !text->burn()) {
1044                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
1045         }
1046 }
1047
1048 void
1049 Player::seek (DCPTime time, bool accurate)
1050 {
1051         boost::mutex::scoped_lock lm (_mutex);
1052
1053         if (_suspended) {
1054                 /* We can't seek in this state */
1055                 return;
1056         }
1057
1058         if (_shuffler) {
1059                 _shuffler->clear ();
1060         }
1061
1062         _delay.clear ();
1063
1064         if (_audio_processor) {
1065                 _audio_processor->flush ();
1066         }
1067
1068         _audio_merger.clear ();
1069         for (int i = 0; i < TEXT_COUNT; ++i) {
1070                 _active_texts[i].clear ();
1071         }
1072
1073         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1074                 if (time < i->content->position()) {
1075                         /* Before; seek to the start of the content.  Even if this request is for an inaccurate seek
1076                            we must seek this (following) content accurately, otherwise when we come to the end of the current
1077                            content we may not start right at the beginning of the next, causing a gap (if the next content has
1078                            been trimmed to a point between keyframes, or something).
1079                         */
1080                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), true);
1081                         i->done = false;
1082                 } else if (i->content->position() <= time && time < i->content->end(_film)) {
1083                         /* During; seek to position */
1084                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1085                         i->done = false;
1086                 } else {
1087                         /* After; this piece is done */
1088                         i->done = true;
1089                 }
1090         }
1091
1092         if (accurate) {
1093                 _last_video_time = time;
1094                 _last_video_eyes = EYES_LEFT;
1095                 _last_audio_time = time;
1096         } else {
1097                 _last_video_time = optional<DCPTime>();
1098                 _last_video_eyes = optional<Eyes>();
1099                 _last_audio_time = optional<DCPTime>();
1100         }
1101
1102         _black.set_position (time);
1103         _silent.set_position (time);
1104
1105         _last_video.clear ();
1106 }
1107
1108 void
1109 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1110 {
1111         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1112            player before the video that requires them.
1113         */
1114         _delay.push_back (make_pair (pv, time));
1115
1116         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1117                 _last_video_time = time + one_video_frame();
1118         }
1119         _last_video_eyes = increment_eyes (pv->eyes());
1120
1121         if (_delay.size() < 3) {
1122                 return;
1123         }
1124
1125         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1126         _delay.pop_front();
1127         do_emit_video (to_do.first, to_do.second);
1128 }
1129
1130 void
1131 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1132 {
1133         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1134                 for (int i = 0; i < TEXT_COUNT; ++i) {
1135                         _active_texts[i].clear_before (time);
1136                 }
1137         }
1138
1139         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1140         if (subtitles) {
1141                 pv->set_text (subtitles.get ());
1142         }
1143
1144         Video (pv, time);
1145 }
1146
1147 void
1148 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1149 {
1150         /* Log if the assert below is about to fail */
1151         if (_last_audio_time && labs(time.get() - _last_audio_time->get()) > 1) {
1152                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1153         }
1154
1155         /* This audio must follow on from the previous, allowing for half a sample (at 48kHz) leeway */
1156         DCPOMATIC_ASSERT (!_last_audio_time || labs(time.get() - _last_audio_time->get()) < 2);
1157         Audio (data, time, _film->audio_frame_rate());
1158         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1159 }
1160
1161 void
1162 Player::fill_audio (DCPTimePeriod period)
1163 {
1164         if (period.from == period.to) {
1165                 return;
1166         }
1167
1168         DCPOMATIC_ASSERT (period.from < period.to);
1169
1170         DCPTime t = period.from;
1171         while (t < period.to) {
1172                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1173                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1174                 if (samples) {
1175                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1176                         silence->make_silent ();
1177                         emit_audio (silence, t);
1178                 }
1179                 t += block;
1180         }
1181 }
1182
1183 DCPTime
1184 Player::one_video_frame () const
1185 {
1186         return DCPTime::from_frames (1, _film->video_frame_rate ());
1187 }
1188
1189 pair<shared_ptr<AudioBuffers>, DCPTime>
1190 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1191 {
1192         DCPTime const discard_time = discard_to - time;
1193         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1194         Frame remaining_frames = audio->frames() - discard_frames;
1195         if (remaining_frames <= 0) {
1196                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1197         }
1198         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1199         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1200         return make_pair(cut, time + discard_time);
1201 }
1202
1203 void
1204 Player::set_dcp_decode_reduction (optional<int> reduction)
1205 {
1206         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1207
1208         {
1209                 boost::mutex::scoped_lock lm (_mutex);
1210
1211                 if (reduction == _dcp_decode_reduction) {
1212                         lm.unlock ();
1213                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1214                         return;
1215                 }
1216
1217                 _dcp_decode_reduction = reduction;
1218                 setup_pieces_unlocked ();
1219         }
1220
1221         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1222 }
1223
1224 optional<DCPTime>
1225 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1226 {
1227         boost::mutex::scoped_lock lm (_mutex);
1228
1229         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1230                 if (i->content == content) {
1231                         return content_time_to_dcp (i, t);
1232                 }
1233         }
1234
1235         /* We couldn't find this content; perhaps things are being changed over */
1236         return optional<DCPTime>();
1237 }