Use more direct route to fix hangs after calling some Player methods.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
103         set_video_container_size (_film->frame_size ());
104
105         film_changed (Film::AUDIO_PROCESSOR);
106
107         seek (DCPTime (), true);
108 }
109
110 Player::~Player ()
111 {
112         delete _shuffler;
113 }
114
115 void
116 Player::setup_pieces ()
117 {
118         _pieces.clear ();
119
120         delete _shuffler;
121         _shuffler = new Shuffler();
122         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
123
124         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125
126                 if (!i->paths_valid ()) {
127                         continue;
128                 }
129
130                 if (_ignore_video && _ignore_audio && i->text.empty()) {
131                         /* We're only interested in text and this content has none */
132                         continue;
133                 }
134
135                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
136                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
137
138                 if (!decoder) {
139                         /* Not something that we can decode; e.g. Atmos content */
140                         continue;
141                 }
142
143                 if (decoder->video && _ignore_video) {
144                         decoder->video->set_ignore (true);
145                 }
146
147                 if (decoder->audio && _ignore_audio) {
148                         decoder->audio->set_ignore (true);
149                 }
150
151                 if (_ignore_text) {
152                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
153                                 i->set_ignore (true);
154                         }
155                 }
156
157                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
158                 if (dcp) {
159                         dcp->set_decode_referenced (_play_referenced);
160                         if (_play_referenced) {
161                                 dcp->set_forced_reduction (_dcp_decode_reduction);
162                         }
163                 }
164
165                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
166                 _pieces.push_back (piece);
167
168                 if (decoder->video) {
169                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
170                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
171                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
172                         } else {
173                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
174                         }
175                 }
176
177                 if (decoder->audio) {
178                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
179                 }
180
181                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
182
183                 while (j != decoder->text.end()) {
184                         (*j)->BitmapStart.connect (
185                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
186                                 );
187                         (*j)->PlainStart.connect (
188                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
189                                 );
190                         (*j)->Stop.connect (
191                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
192                                 );
193
194                         ++j;
195                 }
196         }
197
198         _stream_states.clear ();
199         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
200                 if (i->content->audio) {
201                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
202                                 _stream_states[j] = StreamState (i, i->content->position ());
203                         }
204                 }
205         }
206
207         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
208         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
209
210         _last_video_time = DCPTime ();
211         _last_video_eyes = EYES_BOTH;
212         _last_audio_time = DCPTime ();
213         _have_valid_pieces = true;
214 }
215
216 void
217 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
218 {
219         shared_ptr<Content> c = w.lock ();
220         if (!c) {
221                 return;
222         }
223
224         if (
225                 property == ContentProperty::POSITION ||
226                 property == ContentProperty::LENGTH ||
227                 property == ContentProperty::TRIM_START ||
228                 property == ContentProperty::TRIM_END ||
229                 property == ContentProperty::PATH ||
230                 property == VideoContentProperty::FRAME_TYPE ||
231                 property == VideoContentProperty::COLOUR_CONVERSION ||
232                 property == AudioContentProperty::STREAMS ||
233                 property == DCPContentProperty::NEEDS_ASSETS ||
234                 property == DCPContentProperty::NEEDS_KDM ||
235                 property == TextContentProperty::COLOUR ||
236                 property == TextContentProperty::EFFECT ||
237                 property == TextContentProperty::EFFECT_COLOUR ||
238                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
239                 property == FFmpegContentProperty::FILTERS
240                 ) {
241
242                 {
243                         boost::mutex::scoped_lock lm (_mutex);
244                         _have_valid_pieces = false;
245                 }
246
247                 Changed (property, frequent);
248
249         } else if (
250                 property == TextContentProperty::LINE_SPACING ||
251                 property == TextContentProperty::OUTLINE_WIDTH ||
252                 property == TextContentProperty::Y_SCALE ||
253                 property == TextContentProperty::FADE_IN ||
254                 property == TextContentProperty::FADE_OUT ||
255                 property == ContentProperty::VIDEO_FRAME_RATE ||
256                 property == TextContentProperty::USE ||
257                 property == TextContentProperty::X_OFFSET ||
258                 property == TextContentProperty::Y_OFFSET ||
259                 property == TextContentProperty::X_SCALE ||
260                 property == TextContentProperty::FONTS ||
261                 property == TextContentProperty::TYPE ||
262                 property == VideoContentProperty::CROP ||
263                 property == VideoContentProperty::SCALE ||
264                 property == VideoContentProperty::FADE_IN ||
265                 property == VideoContentProperty::FADE_OUT
266                 ) {
267
268                 Changed (property, frequent);
269         }
270 }
271
272 void
273 Player::set_video_container_size (dcp::Size s)
274 {
275         {
276                 boost::mutex::scoped_lock lm (_mutex);
277
278                 if (s == _video_container_size) {
279                         return;
280                 }
281
282                 _video_container_size = s;
283
284                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
285                 _black_image->make_black ();
286         }
287
288         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
289 }
290
291 void
292 Player::playlist_changed ()
293 {
294         {
295                 boost::mutex::scoped_lock lm (_mutex);
296                 _have_valid_pieces = false;
297         }
298
299         Changed (PlayerProperty::PLAYLIST, false);
300 }
301
302 void
303 Player::film_changed (Film::Property p)
304 {
305         /* Here we should notice Film properties that affect our output, and
306            alert listeners that our output now would be different to how it was
307            last time we were run.
308         */
309
310         if (p == Film::CONTAINER) {
311                 Changed (PlayerProperty::FILM_CONTAINER, false);
312         } else if (p == Film::VIDEO_FRAME_RATE) {
313                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
314                    so we need new pieces here.
315                 */
316                 {
317                         boost::mutex::scoped_lock lm (_mutex);
318                         _have_valid_pieces = false;
319                 }
320                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321         } else if (p == Film::AUDIO_PROCESSOR) {
322                 if (_film->audio_processor ()) {
323                         boost::mutex::scoped_lock lm (_mutex);
324                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
325                 }
326         } else if (p == Film::AUDIO_CHANNELS) {
327                 boost::mutex::scoped_lock lm (_mutex);
328                 _audio_merger.clear ();
329         }
330 }
331
332 list<PositionImage>
333 Player::transform_bitmap_texts (list<BitmapText> subs) const
334 {
335         list<PositionImage> all;
336
337         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
338                 if (!i->image) {
339                         continue;
340                 }
341
342                 /* We will scale the subtitle up to fit _video_container_size */
343                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
344
345                 all.push_back (
346                         PositionImage (
347                                 i->image->scale (
348                                         scaled_size,
349                                         dcp::YUV_TO_RGB_REC601,
350                                         i->image->pixel_format (),
351                                         true,
352                                         _fast
353                                         ),
354                                 Position<int> (
355                                         lrint (_video_container_size.width * i->rectangle.x),
356                                         lrint (_video_container_size.height * i->rectangle.y)
357                                         )
358                                 )
359                         );
360         }
361
362         return all;
363 }
364
365 shared_ptr<PlayerVideo>
366 Player::black_player_video_frame (Eyes eyes) const
367 {
368         return shared_ptr<PlayerVideo> (
369                 new PlayerVideo (
370                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
371                         Crop (),
372                         optional<double> (),
373                         _video_container_size,
374                         _video_container_size,
375                         eyes,
376                         PART_WHOLE,
377                         PresetColourConversion::all().front().conversion,
378                         boost::weak_ptr<Content>(),
379                         boost::optional<Frame>()
380                 )
381         );
382 }
383
384 Frame
385 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
386 {
387         DCPTime s = t - piece->content->position ();
388         s = min (piece->content->length_after_trim(), s);
389         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
390
391         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
392            then convert that ContentTime to frames at the content's rate.  However this fails for
393            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
394            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
395
396            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
397         */
398         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
399 }
400
401 DCPTime
402 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
403 {
404         /* See comment in dcp_to_content_video */
405         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
406         return d + piece->content->position();
407 }
408
409 Frame
410 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
411 {
412         DCPTime s = t - piece->content->position ();
413         s = min (piece->content->length_after_trim(), s);
414         /* See notes in dcp_to_content_video */
415         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
416 }
417
418 DCPTime
419 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
420 {
421         /* See comment in dcp_to_content_video */
422         return DCPTime::from_frames (f, _film->audio_frame_rate())
423                 - DCPTime (piece->content->trim_start(), piece->frc)
424                 + piece->content->position();
425 }
426
427 ContentTime
428 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
429 {
430         DCPTime s = t - piece->content->position ();
431         s = min (piece->content->length_after_trim(), s);
432         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
433 }
434
435 DCPTime
436 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
437 {
438         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
439 }
440
441 list<shared_ptr<Font> >
442 Player::get_subtitle_fonts ()
443 {
444         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
445
446         if (!_have_valid_pieces) {
447                 setup_pieces ();
448         }
449
450         list<shared_ptr<Font> > fonts;
451         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
452                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
453                         /* XXX: things may go wrong if there are duplicate font IDs
454                            with different font files.
455                         */
456                         list<shared_ptr<Font> > f = j->fonts ();
457                         copy (f.begin(), f.end(), back_inserter (fonts));
458                 }
459         }
460
461         return fonts;
462 }
463
464 /** Set this player never to produce any video data */
465 void
466 Player::set_ignore_video ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _ignore_video = true;
470         setup_pieces ();
471 }
472
473 void
474 Player::set_ignore_audio ()
475 {
476         boost::mutex::scoped_lock lm (_mutex);
477         _ignore_audio = true;
478         setup_pieces ();
479 }
480
481 void
482 Player::set_ignore_text ()
483 {
484         boost::mutex::scoped_lock lm (_mutex);
485         _ignore_text = true;
486         setup_pieces ();
487 }
488
489 /** Set the player to always burn open texts into the image regardless of the content settings */
490 void
491 Player::set_always_burn_open_subtitles ()
492 {
493         boost::mutex::scoped_lock lm (_mutex);
494         _always_burn_open_subtitles = true;
495 }
496
497 /** Sets up the player to be faster, possibly at the expense of quality */
498 void
499 Player::set_fast ()
500 {
501         boost::mutex::scoped_lock lm (_mutex);
502         _fast = true;
503         setup_pieces ();
504 }
505
506 void
507 Player::set_play_referenced ()
508 {
509         boost::mutex::scoped_lock lm (_mutex);
510         _play_referenced = true;
511         setup_pieces ();
512 }
513
514 list<ReferencedReelAsset>
515 Player::get_reel_assets ()
516 {
517         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
518
519         list<ReferencedReelAsset> a;
520
521         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
522                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
523                 if (!j) {
524                         continue;
525                 }
526
527                 scoped_ptr<DCPDecoder> decoder;
528                 try {
529                         decoder.reset (new DCPDecoder (j, _film->log(), false));
530                 } catch (...) {
531                         return a;
532                 }
533
534                 int64_t offset = 0;
535                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
536
537                         DCPOMATIC_ASSERT (j->video_frame_rate ());
538                         double const cfr = j->video_frame_rate().get();
539                         Frame const trim_start = j->trim_start().frames_round (cfr);
540                         Frame const trim_end = j->trim_end().frames_round (cfr);
541                         int const ffr = _film->video_frame_rate ();
542
543                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
544                         if (j->reference_video ()) {
545                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
546                                 DCPOMATIC_ASSERT (ra);
547                                 ra->set_entry_point (ra->entry_point() + trim_start);
548                                 ra->set_duration (ra->duration() - trim_start - trim_end);
549                                 a.push_back (
550                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
551                                         );
552                         }
553
554                         if (j->reference_audio ()) {
555                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
556                                 DCPOMATIC_ASSERT (ra);
557                                 ra->set_entry_point (ra->entry_point() + trim_start);
558                                 ra->set_duration (ra->duration() - trim_start - trim_end);
559                                 a.push_back (
560                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
561                                         );
562                         }
563
564                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
565                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
566                                 DCPOMATIC_ASSERT (ra);
567                                 ra->set_entry_point (ra->entry_point() + trim_start);
568                                 ra->set_duration (ra->duration() - trim_start - trim_end);
569                                 a.push_back (
570                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
571                                         );
572                         }
573
574                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
575                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
576                                 DCPOMATIC_ASSERT (ra);
577                                 ra->set_entry_point (ra->entry_point() + trim_start);
578                                 ra->set_duration (ra->duration() - trim_start - trim_end);
579                                 a.push_back (
580                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
581                                         );
582                         }
583
584                         /* Assume that main picture duration is the length of the reel */
585                         offset += k->main_picture()->duration ();
586                 }
587         }
588
589         return a;
590 }
591
592 bool
593 Player::pass ()
594 {
595         boost::mutex::scoped_lock lm (_mutex);
596
597         if (!_have_valid_pieces) {
598                 /* This should only happen when we are under the control of the butler.  In this case, _have_valid_pieces
599                    will be false if something in the Player has changed and we are waiting for the butler to notice
600                    and do a seek back to the place we were at before.  During this time we don't want pass() to do anything,
601                    as just after setup_pieces the new decoders will be back to time 0 until the seek has gone through.  Just do nothing
602                    here and assume that the seek will be forthcoming.
603                 */
604                 return false;
605         }
606
607         if (_playlist->length() == DCPTime()) {
608                 /* Special case of an empty Film; just give one black frame */
609                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
610                 return true;
611         }
612
613         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
614
615         shared_ptr<Piece> earliest_content;
616         optional<DCPTime> earliest_time;
617
618         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
619                 if (i->done) {
620                         continue;
621                 }
622
623                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
624                 if (t > i->content->end()) {
625                         i->done = true;
626                 } else {
627
628                         /* Given two choices at the same time, pick the one with texts so we see it before
629                            the video.
630                         */
631                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
632                                 earliest_time = t;
633                                 earliest_content = i;
634                         }
635                 }
636         }
637
638         bool done = false;
639
640         enum {
641                 NONE,
642                 CONTENT,
643                 BLACK,
644                 SILENT
645         } which = NONE;
646
647         if (earliest_content) {
648                 which = CONTENT;
649         }
650
651         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
652                 earliest_time = _black.position ();
653                 which = BLACK;
654         }
655
656         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
657                 earliest_time = _silent.position ();
658                 which = SILENT;
659         }
660
661         switch (which) {
662         case CONTENT:
663                 earliest_content->done = earliest_content->decoder->pass ();
664                 break;
665         case BLACK:
666                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
667                 _black.set_position (_black.position() + one_video_frame());
668                 break;
669         case SILENT:
670         {
671                 DCPTimePeriod period (_silent.period_at_position());
672                 if (_last_audio_time) {
673                         /* Sometimes the thing that happened last finishes fractionally before
674                            this silence.  Bodge the start time of the silence to fix it.  I'm
675                            not sure if this is the right solution --- maybe the last thing should
676                            be padded `forward' rather than this thing padding `back'.
677                         */
678                         period.from = min(period.from, *_last_audio_time);
679                 }
680                 if (period.duration() > one_video_frame()) {
681                         period.to = period.from + one_video_frame();
682                 }
683                 fill_audio (period);
684                 _silent.set_position (period.to);
685                 break;
686         }
687         case NONE:
688                 done = true;
689                 break;
690         }
691
692         /* Emit any audio that is ready */
693
694         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
695            of our streams, or the position of the _silent.
696         */
697         DCPTime pull_to = _film->length ();
698         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
699                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
700                         pull_to = i->second.last_push_end;
701                 }
702         }
703         if (!_silent.done() && _silent.position() < pull_to) {
704                 pull_to = _silent.position();
705         }
706
707         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
708         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
709                 if (_last_audio_time && i->second < *_last_audio_time) {
710                         /* This new data comes before the last we emitted (or the last seek); discard it */
711                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
712                         if (!cut.first) {
713                                 continue;
714                         }
715                         *i = cut;
716                 } else if (_last_audio_time && i->second > *_last_audio_time) {
717                         /* There's a gap between this data and the last we emitted; fill with silence */
718                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
719                 }
720
721                 emit_audio (i->first, i->second);
722         }
723
724         if (done) {
725                 _shuffler->flush ();
726                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
727                         do_emit_video(i->first, i->second);
728                 }
729         }
730
731         return done;
732 }
733
734 /** @return Open subtitles for the frame at the given time, converted to images */
735 optional<PositionImage>
736 Player::open_subtitles_for_frame (DCPTime time) const
737 {
738         list<PositionImage> captions;
739         int const vfr = _film->video_frame_rate();
740
741         BOOST_FOREACH (
742                 PlayerText j,
743                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
744                 ) {
745
746                 /* Bitmap subtitles */
747                 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
748                 copy (c.begin(), c.end(), back_inserter (captions));
749
750                 /* String subtitles (rendered to an image) */
751                 if (!j.string.empty ()) {
752                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
753                         copy (s.begin(), s.end(), back_inserter (captions));
754                 }
755         }
756
757         if (captions.empty ()) {
758                 return optional<PositionImage> ();
759         }
760
761         return merge (captions);
762 }
763
764 void
765 Player::video (weak_ptr<Piece> wp, ContentVideo video)
766 {
767         shared_ptr<Piece> piece = wp.lock ();
768         if (!piece) {
769                 return;
770         }
771
772         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
773         if (frc.skip && (video.frame % 2) == 1) {
774                 return;
775         }
776
777         /* Time of the first frame we will emit */
778         DCPTime const time = content_video_to_dcp (piece, video.frame);
779
780         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
781            if it's after the content's period here as in that case we still need to fill any gap between
782            `now' and the end of the content's period.
783         */
784         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
785                 return;
786         }
787
788         /* Fill gaps that we discover now that we have some video which needs to be emitted.
789            This is where we need to fill to.
790         */
791         DCPTime fill_to = min (time, piece->content->end());
792
793         if (_last_video_time) {
794                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
795                 LastVideoMap::const_iterator last = _last_video.find (wp);
796                 if (_film->three_d()) {
797                         Eyes fill_to_eyes = video.eyes;
798                         if (fill_to == piece->content->end()) {
799                                 /* Don't fill after the end of the content */
800                                 fill_to_eyes = EYES_LEFT;
801                         }
802                         DCPTime j = fill_from;
803                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
804                         if (eyes == EYES_BOTH) {
805                                 eyes = EYES_LEFT;
806                         }
807                         while (j < fill_to || eyes != fill_to_eyes) {
808                                 if (last != _last_video.end()) {
809                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
810                                         copy->set_eyes (eyes);
811                                         emit_video (copy, j);
812                                 } else {
813                                         emit_video (black_player_video_frame(eyes), j);
814                                 }
815                                 if (eyes == EYES_RIGHT) {
816                                         j += one_video_frame();
817                                 }
818                                 eyes = increment_eyes (eyes);
819                         }
820                 } else {
821                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
822                                 if (last != _last_video.end()) {
823                                         emit_video (last->second, j);
824                                 } else {
825                                         emit_video (black_player_video_frame(EYES_BOTH), j);
826                                 }
827                         }
828                 }
829         }
830
831         _last_video[wp].reset (
832                 new PlayerVideo (
833                         video.image,
834                         piece->content->video->crop (),
835                         piece->content->video->fade (video.frame),
836                         piece->content->video->scale().size (
837                                 piece->content->video, _video_container_size, _film->frame_size ()
838                                 ),
839                         _video_container_size,
840                         video.eyes,
841                         video.part,
842                         piece->content->video->colour_conversion(),
843                         piece->content,
844                         video.frame
845                         )
846                 );
847
848         DCPTime t = time;
849         for (int i = 0; i < frc.repeat; ++i) {
850                 if (t < piece->content->end()) {
851                         emit_video (_last_video[wp], t);
852                 }
853                 t += one_video_frame ();
854         }
855 }
856
857 void
858 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
859 {
860         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
861
862         shared_ptr<Piece> piece = wp.lock ();
863         if (!piece) {
864                 return;
865         }
866
867         shared_ptr<AudioContent> content = piece->content->audio;
868         DCPOMATIC_ASSERT (content);
869
870         /* Compute time in the DCP */
871         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
872         /* And the end of this block in the DCP */
873         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
874
875         /* Remove anything that comes before the start or after the end of the content */
876         if (time < piece->content->position()) {
877                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
878                 if (!cut.first) {
879                         /* This audio is entirely discarded */
880                         return;
881                 }
882                 content_audio.audio = cut.first;
883                 time = cut.second;
884         } else if (time > piece->content->end()) {
885                 /* Discard it all */
886                 return;
887         } else if (end > piece->content->end()) {
888                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
889                 if (remaining_frames == 0) {
890                         return;
891                 }
892                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
893                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
894                 content_audio.audio = cut;
895         }
896
897         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
898
899         /* Gain */
900
901         if (content->gain() != 0) {
902                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
903                 gain->apply_gain (content->gain ());
904                 content_audio.audio = gain;
905         }
906
907         /* Remap */
908
909         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
910
911         /* Process */
912
913         if (_audio_processor) {
914                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
915         }
916
917         /* Push */
918
919         _audio_merger.push (content_audio.audio, time);
920         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
921         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
922 }
923
924 void
925 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
926 {
927         shared_ptr<Piece> piece = wp.lock ();
928         shared_ptr<const TextContent> text = wc.lock ();
929         if (!piece || !text) {
930                 return;
931         }
932
933         /* Apply content's subtitle offsets */
934         subtitle.sub.rectangle.x += text->x_offset ();
935         subtitle.sub.rectangle.y += text->y_offset ();
936
937         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
938         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
939         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
940
941         /* Apply content's subtitle scale */
942         subtitle.sub.rectangle.width *= text->x_scale ();
943         subtitle.sub.rectangle.height *= text->y_scale ();
944
945         PlayerText ps;
946         ps.bitmap.push_back (subtitle.sub);
947         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
948
949         _active_texts[subtitle.type()].add_from (wc, ps, from);
950 }
951
952 void
953 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
954 {
955         shared_ptr<Piece> piece = wp.lock ();
956         shared_ptr<const TextContent> text = wc.lock ();
957         if (!piece || !text) {
958                 return;
959         }
960
961         PlayerText ps;
962         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
963
964         if (from > piece->content->end()) {
965                 return;
966         }
967
968         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
969                 s.set_h_position (s.h_position() + text->x_offset ());
970                 s.set_v_position (s.v_position() + text->y_offset ());
971                 float const xs = text->x_scale();
972                 float const ys = text->y_scale();
973                 float size = s.size();
974
975                 /* Adjust size to express the common part of the scaling;
976                    e.g. if xs = ys = 0.5 we scale size by 2.
977                 */
978                 if (xs > 1e-5 && ys > 1e-5) {
979                         size *= 1 / min (1 / xs, 1 / ys);
980                 }
981                 s.set_size (size);
982
983                 /* Then express aspect ratio changes */
984                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
985                         s.set_aspect_adjust (xs / ys);
986                 }
987
988                 s.set_in (dcp::Time(from.seconds(), 1000));
989                 ps.string.push_back (StringText (s, text->outline_width()));
990                 ps.add_fonts (text->fonts ());
991         }
992
993         _active_texts[subtitle.type()].add_from (wc, ps, from);
994 }
995
996 void
997 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
998 {
999         if (!_active_texts[type].have (wc)) {
1000                 return;
1001         }
1002
1003         shared_ptr<Piece> piece = wp.lock ();
1004         shared_ptr<const TextContent> text = wc.lock ();
1005         if (!piece || !text) {
1006                 return;
1007         }
1008
1009         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1010
1011         if (dcp_to > piece->content->end()) {
1012                 return;
1013         }
1014
1015         pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1016
1017         bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1018         if (text->use() && !always && !text->burn()) {
1019                 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1020         }
1021 }
1022
1023 void
1024 Player::seek (DCPTime time, bool accurate)
1025 {
1026         boost::mutex::scoped_lock lm (_mutex);
1027
1028         if (!_have_valid_pieces) {
1029                 setup_pieces ();
1030         }
1031
1032         if (_shuffler) {
1033                 _shuffler->clear ();
1034         }
1035
1036         _delay.clear ();
1037
1038         if (_audio_processor) {
1039                 _audio_processor->flush ();
1040         }
1041
1042         _audio_merger.clear ();
1043         for (int i = 0; i < TEXT_COUNT; ++i) {
1044                 _active_texts[i].clear ();
1045         }
1046
1047         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1048                 if (time < i->content->position()) {
1049                         /* Before; seek to the start of the content */
1050                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1051                         i->done = false;
1052                 } else if (i->content->position() <= time && time < i->content->end()) {
1053                         /* During; seek to position */
1054                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1055                         i->done = false;
1056                 } else {
1057                         /* After; this piece is done */
1058                         i->done = true;
1059                 }
1060         }
1061
1062         if (accurate) {
1063                 _last_video_time = time;
1064                 _last_video_eyes = EYES_LEFT;
1065                 _last_audio_time = time;
1066         } else {
1067                 _last_video_time = optional<DCPTime>();
1068                 _last_video_eyes = optional<Eyes>();
1069                 _last_audio_time = optional<DCPTime>();
1070         }
1071
1072         _black.set_position (time);
1073         _silent.set_position (time);
1074
1075         _last_video.clear ();
1076 }
1077
1078 void
1079 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1080 {
1081         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1082            player before the video that requires them.
1083         */
1084         _delay.push_back (make_pair (pv, time));
1085
1086         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1087                 _last_video_time = time + one_video_frame();
1088         }
1089         _last_video_eyes = increment_eyes (pv->eyes());
1090
1091         if (_delay.size() < 3) {
1092                 return;
1093         }
1094
1095         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1096         _delay.pop_front();
1097         do_emit_video (to_do.first, to_do.second);
1098 }
1099
1100 void
1101 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1102 {
1103         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1104                 for (int i = 0; i < TEXT_COUNT; ++i) {
1105                         _active_texts[i].clear_before (time);
1106                 }
1107         }
1108
1109         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1110         if (subtitles) {
1111                 pv->set_text (subtitles.get ());
1112         }
1113
1114         Video (pv, time);
1115 }
1116
1117 void
1118 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1119 {
1120         /* Log if the assert below is about to fail */
1121         if (_last_audio_time && time != *_last_audio_time) {
1122                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1123         }
1124
1125         /* This audio must follow on from the previous */
1126         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1127         Audio (data, time);
1128         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1129 }
1130
1131 void
1132 Player::fill_audio (DCPTimePeriod period)
1133 {
1134         if (period.from == period.to) {
1135                 return;
1136         }
1137
1138         DCPOMATIC_ASSERT (period.from < period.to);
1139
1140         DCPTime t = period.from;
1141         while (t < period.to) {
1142                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1143                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1144                 if (samples) {
1145                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1146                         silence->make_silent ();
1147                         emit_audio (silence, t);
1148                 }
1149                 t += block;
1150         }
1151 }
1152
1153 DCPTime
1154 Player::one_video_frame () const
1155 {
1156         return DCPTime::from_frames (1, _film->video_frame_rate ());
1157 }
1158
1159 pair<shared_ptr<AudioBuffers>, DCPTime>
1160 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1161 {
1162         DCPTime const discard_time = discard_to - time;
1163         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1164         Frame remaining_frames = audio->frames() - discard_frames;
1165         if (remaining_frames <= 0) {
1166                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1167         }
1168         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1169         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1170         return make_pair(cut, time + discard_time);
1171 }
1172
1173 void
1174 Player::set_dcp_decode_reduction (optional<int> reduction)
1175 {
1176         {
1177                 boost::mutex::scoped_lock lm (_mutex);
1178
1179                 if (reduction == _dcp_decode_reduction) {
1180                         return;
1181                 }
1182
1183                 _dcp_decode_reduction = reduction;
1184                 _have_valid_pieces = false;
1185         }
1186
1187         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1188 }
1189
1190 DCPTime
1191 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1192 {
1193         boost::mutex::scoped_lock lm (_mutex);
1194
1195         if (_have_valid_pieces) {
1196                 setup_pieces ();
1197         }
1198
1199         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1200                 if (i->content == content) {
1201                         return content_time_to_dcp (i, t);
1202                 }
1203         }
1204
1205         DCPOMATIC_ASSERT (false);
1206         return DCPTime ();
1207 }