Missing handling of CHANGE_TYPE_CANCELLED causing another hang.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101         /* The butler must hear about this first, so since we are proxying this through to the butler we must
102            be first.
103         */
104         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106         set_video_container_size (_film->frame_size ());
107
108         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109
110         setup_pieces ();
111         seek (DCPTime (), true);
112 }
113
114 Player::~Player ()
115 {
116         delete _shuffler;
117 }
118
119 void
120 Player::setup_pieces ()
121 {
122         boost::mutex::scoped_lock lm (_mutex);
123         setup_pieces_unlocked ();
124 }
125
126 void
127 Player::setup_pieces_unlocked ()
128 {
129         _pieces.clear ();
130
131         delete _shuffler;
132         _shuffler = new Shuffler();
133         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
134
135         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
136
137                 if (!i->paths_valid ()) {
138                         continue;
139                 }
140
141                 if (_ignore_video && _ignore_audio && i->text.empty()) {
142                         /* We're only interested in text and this content has none */
143                         continue;
144                 }
145
146                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
147                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
148
149                 if (!decoder) {
150                         /* Not something that we can decode; e.g. Atmos content */
151                         continue;
152                 }
153
154                 if (decoder->video && _ignore_video) {
155                         decoder->video->set_ignore (true);
156                 }
157
158                 if (decoder->audio && _ignore_audio) {
159                         decoder->audio->set_ignore (true);
160                 }
161
162                 if (_ignore_text) {
163                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
164                                 i->set_ignore (true);
165                         }
166                 }
167
168                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
169                 if (dcp) {
170                         dcp->set_decode_referenced (_play_referenced);
171                         if (_play_referenced) {
172                                 dcp->set_forced_reduction (_dcp_decode_reduction);
173                         }
174                 }
175
176                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
177                 _pieces.push_back (piece);
178
179                 if (decoder->video) {
180                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
181                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
182                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
183                         } else {
184                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
185                         }
186                 }
187
188                 if (decoder->audio) {
189                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
190                 }
191
192                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
193
194                 while (j != decoder->text.end()) {
195                         (*j)->BitmapStart.connect (
196                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
197                                 );
198                         (*j)->PlainStart.connect (
199                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
200                                 );
201                         (*j)->Stop.connect (
202                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
203                                 );
204
205                         ++j;
206                 }
207         }
208
209         _stream_states.clear ();
210         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
211                 if (i->content->audio) {
212                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
213                                 _stream_states[j] = StreamState (i, i->content->position ());
214                         }
215                 }
216         }
217
218         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
219         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
220
221         _last_video_time = DCPTime ();
222         _last_video_eyes = EYES_BOTH;
223         _last_audio_time = DCPTime ();
224         _suspended = false;
225 }
226
227 void
228 Player::playlist_content_change (ChangeType type, int property, bool frequent)
229 {
230         if (type == CHANGE_TYPE_PENDING) {
231                 boost::mutex::scoped_lock lm (_mutex);
232                 /* The player content is probably about to change, so we can't carry on
233                    until that has happened and we've rebuilt our pieces.  Stop pass()
234                    and seek() from working until then.
235                 */
236                 _suspended = true;
237         } else if (type == CHANGE_TYPE_DONE) {
238                 /* A change in our content has gone through.  Re-build our pieces. */
239                 setup_pieces ();
240         } else if (type == CHANGE_TYPE_CANCELLED) {
241                 _suspended = false;
242         }
243
244         Change (type, property, frequent);
245 }
246
247 void
248 Player::set_video_container_size (dcp::Size s)
249 {
250         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
251
252         {
253                 boost::mutex::scoped_lock lm (_mutex);
254
255                 if (s == _video_container_size) {
256                         lm.unlock ();
257                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
258                         return;
259                 }
260
261                 _video_container_size = s;
262
263                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
264                 _black_image->make_black ();
265         }
266
267         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
268 }
269
270 void
271 Player::playlist_change (ChangeType type)
272 {
273         if (type == CHANGE_TYPE_DONE) {
274                 setup_pieces ();
275         }
276         Change (type, PlayerProperty::PLAYLIST, false);
277 }
278
279 void
280 Player::film_change (ChangeType type, Film::Property p)
281 {
282         /* Here we should notice Film properties that affect our output, and
283            alert listeners that our output now would be different to how it was
284            last time we were run.
285         */
286
287         if (p == Film::CONTAINER) {
288                 Change (type, PlayerProperty::FILM_CONTAINER, false);
289         } else if (p == Film::VIDEO_FRAME_RATE) {
290                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
291                    so we need new pieces here.
292                 */
293                 if (type == CHANGE_TYPE_DONE) {
294                         setup_pieces ();
295                 }
296                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
297         } else if (p == Film::AUDIO_PROCESSOR) {
298                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
299                         boost::mutex::scoped_lock lm (_mutex);
300                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
301                 }
302         } else if (p == Film::AUDIO_CHANNELS) {
303                 if (type == CHANGE_TYPE_DONE) {
304                         boost::mutex::scoped_lock lm (_mutex);
305                         _audio_merger.clear ();
306                 }
307         }
308 }
309
310 list<PositionImage>
311 Player::transform_bitmap_texts (list<BitmapText> subs) const
312 {
313         list<PositionImage> all;
314
315         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
316                 if (!i->image) {
317                         continue;
318                 }
319
320                 /* We will scale the subtitle up to fit _video_container_size */
321                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
322
323                 all.push_back (
324                         PositionImage (
325                                 i->image->scale (
326                                         scaled_size,
327                                         dcp::YUV_TO_RGB_REC601,
328                                         i->image->pixel_format (),
329                                         true,
330                                         _fast
331                                         ),
332                                 Position<int> (
333                                         lrint (_video_container_size.width * i->rectangle.x),
334                                         lrint (_video_container_size.height * i->rectangle.y)
335                                         )
336                                 )
337                         );
338         }
339
340         return all;
341 }
342
343 shared_ptr<PlayerVideo>
344 Player::black_player_video_frame (Eyes eyes) const
345 {
346         return shared_ptr<PlayerVideo> (
347                 new PlayerVideo (
348                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
349                         Crop (),
350                         optional<double> (),
351                         _video_container_size,
352                         _video_container_size,
353                         eyes,
354                         PART_WHOLE,
355                         PresetColourConversion::all().front().conversion,
356                         boost::weak_ptr<Content>(),
357                         boost::optional<Frame>()
358                 )
359         );
360 }
361
362 Frame
363 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
368
369         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
370            then convert that ContentTime to frames at the content's rate.  However this fails for
371            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
372            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
373
374            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
375         */
376         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
377 }
378
379 DCPTime
380 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
381 {
382         /* See comment in dcp_to_content_video */
383         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
384         return d + piece->content->position();
385 }
386
387 Frame
388 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
389 {
390         DCPTime s = t - piece->content->position ();
391         s = min (piece->content->length_after_trim(), s);
392         /* See notes in dcp_to_content_video */
393         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
394 }
395
396 DCPTime
397 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
398 {
399         /* See comment in dcp_to_content_video */
400         return DCPTime::from_frames (f, _film->audio_frame_rate())
401                 - DCPTime (piece->content->trim_start(), piece->frc)
402                 + piece->content->position();
403 }
404
405 ContentTime
406 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
407 {
408         DCPTime s = t - piece->content->position ();
409         s = min (piece->content->length_after_trim(), s);
410         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
411 }
412
413 DCPTime
414 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
415 {
416         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
417 }
418
419 list<shared_ptr<Font> >
420 Player::get_subtitle_fonts ()
421 {
422         boost::mutex::scoped_lock lm (_mutex);
423
424         list<shared_ptr<Font> > fonts;
425         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
426                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
427                         /* XXX: things may go wrong if there are duplicate font IDs
428                            with different font files.
429                         */
430                         list<shared_ptr<Font> > f = j->fonts ();
431                         copy (f.begin(), f.end(), back_inserter (fonts));
432                 }
433         }
434
435         return fonts;
436 }
437
438 /** Set this player never to produce any video data */
439 void
440 Player::set_ignore_video ()
441 {
442         boost::mutex::scoped_lock lm (_mutex);
443         _ignore_video = true;
444         setup_pieces_unlocked ();
445 }
446
447 void
448 Player::set_ignore_audio ()
449 {
450         boost::mutex::scoped_lock lm (_mutex);
451         _ignore_audio = true;
452         setup_pieces_unlocked ();
453 }
454
455 void
456 Player::set_ignore_text ()
457 {
458         boost::mutex::scoped_lock lm (_mutex);
459         _ignore_text = true;
460         setup_pieces_unlocked ();
461 }
462
463 /** Set the player to always burn open texts into the image regardless of the content settings */
464 void
465 Player::set_always_burn_open_subtitles ()
466 {
467         boost::mutex::scoped_lock lm (_mutex);
468         _always_burn_open_subtitles = true;
469 }
470
471 /** Sets up the player to be faster, possibly at the expense of quality */
472 void
473 Player::set_fast ()
474 {
475         boost::mutex::scoped_lock lm (_mutex);
476         _fast = true;
477         setup_pieces_unlocked ();
478 }
479
480 void
481 Player::set_play_referenced ()
482 {
483         boost::mutex::scoped_lock lm (_mutex);
484         _play_referenced = true;
485         setup_pieces_unlocked ();
486 }
487
488 list<ReferencedReelAsset>
489 Player::get_reel_assets ()
490 {
491         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
492
493         list<ReferencedReelAsset> a;
494
495         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
496                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
497                 if (!j) {
498                         continue;
499                 }
500
501                 scoped_ptr<DCPDecoder> decoder;
502                 try {
503                         decoder.reset (new DCPDecoder (j, _film->log(), false));
504                 } catch (...) {
505                         return a;
506                 }
507
508                 int64_t offset = 0;
509                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
510
511                         DCPOMATIC_ASSERT (j->video_frame_rate ());
512                         double const cfr = j->video_frame_rate().get();
513                         Frame const trim_start = j->trim_start().frames_round (cfr);
514                         Frame const trim_end = j->trim_end().frames_round (cfr);
515                         int const ffr = _film->video_frame_rate ();
516
517                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
518                         if (j->reference_video ()) {
519                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
520                                 DCPOMATIC_ASSERT (ra);
521                                 ra->set_entry_point (ra->entry_point() + trim_start);
522                                 ra->set_duration (ra->duration() - trim_start - trim_end);
523                                 a.push_back (
524                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
525                                         );
526                         }
527
528                         if (j->reference_audio ()) {
529                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
530                                 DCPOMATIC_ASSERT (ra);
531                                 ra->set_entry_point (ra->entry_point() + trim_start);
532                                 ra->set_duration (ra->duration() - trim_start - trim_end);
533                                 a.push_back (
534                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
535                                         );
536                         }
537
538                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
539                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
540                                 DCPOMATIC_ASSERT (ra);
541                                 ra->set_entry_point (ra->entry_point() + trim_start);
542                                 ra->set_duration (ra->duration() - trim_start - trim_end);
543                                 a.push_back (
544                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
545                                         );
546                         }
547
548                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
549                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
550                                         DCPOMATIC_ASSERT (l);
551                                         l->set_entry_point (l->entry_point() + trim_start);
552                                         l->set_duration (l->duration() - trim_start - trim_end);
553                                         a.push_back (
554                                                 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
555                                                 );
556                                 }
557                         }
558
559                         /* Assume that main picture duration is the length of the reel */
560                         offset += k->main_picture()->duration ();
561                 }
562         }
563
564         return a;
565 }
566
567 bool
568 Player::pass ()
569 {
570         boost::mutex::scoped_lock lm (_mutex);
571
572         if (_suspended) {
573                 /* We can't pass in this state */
574                 return false;
575         }
576
577         if (_playlist->length() == DCPTime()) {
578                 /* Special case of an empty Film; just give one black frame */
579                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
580                 return true;
581         }
582
583         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
584
585         shared_ptr<Piece> earliest_content;
586         optional<DCPTime> earliest_time;
587
588         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
589                 if (i->done) {
590                         continue;
591                 }
592
593                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
594                 if (t > i->content->end()) {
595                         i->done = true;
596                 } else {
597
598                         /* Given two choices at the same time, pick the one with texts so we see it before
599                            the video.
600                         */
601                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
602                                 earliest_time = t;
603                                 earliest_content = i;
604                         }
605                 }
606         }
607
608         bool done = false;
609
610         enum {
611                 NONE,
612                 CONTENT,
613                 BLACK,
614                 SILENT
615         } which = NONE;
616
617         if (earliest_content) {
618                 which = CONTENT;
619         }
620
621         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
622                 earliest_time = _black.position ();
623                 which = BLACK;
624         }
625
626         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
627                 earliest_time = _silent.position ();
628                 which = SILENT;
629         }
630
631         switch (which) {
632         case CONTENT:
633                 earliest_content->done = earliest_content->decoder->pass ();
634                 break;
635         case BLACK:
636                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
637                 _black.set_position (_black.position() + one_video_frame());
638                 break;
639         case SILENT:
640         {
641                 DCPTimePeriod period (_silent.period_at_position());
642                 if (_last_audio_time) {
643                         /* Sometimes the thing that happened last finishes fractionally before
644                            this silence.  Bodge the start time of the silence to fix it.  I'm
645                            not sure if this is the right solution --- maybe the last thing should
646                            be padded `forward' rather than this thing padding `back'.
647                         */
648                         period.from = min(period.from, *_last_audio_time);
649                 }
650                 if (period.duration() > one_video_frame()) {
651                         period.to = period.from + one_video_frame();
652                 }
653                 fill_audio (period);
654                 _silent.set_position (period.to);
655                 break;
656         }
657         case NONE:
658                 done = true;
659                 break;
660         }
661
662         /* Emit any audio that is ready */
663
664         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
665            of our streams, or the position of the _silent.
666         */
667         DCPTime pull_to = _film->length ();
668         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
669                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
670                         pull_to = i->second.last_push_end;
671                 }
672         }
673         if (!_silent.done() && _silent.position() < pull_to) {
674                 pull_to = _silent.position();
675         }
676
677         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
678         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
679                 if (_last_audio_time && i->second < *_last_audio_time) {
680                         /* This new data comes before the last we emitted (or the last seek); discard it */
681                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
682                         if (!cut.first) {
683                                 continue;
684                         }
685                         *i = cut;
686                 } else if (_last_audio_time && i->second > *_last_audio_time) {
687                         /* There's a gap between this data and the last we emitted; fill with silence */
688                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
689                 }
690
691                 emit_audio (i->first, i->second);
692         }
693
694         if (done) {
695                 _shuffler->flush ();
696                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
697                         do_emit_video(i->first, i->second);
698                 }
699         }
700
701         return done;
702 }
703
704 /** @return Open subtitles for the frame at the given time, converted to images */
705 optional<PositionImage>
706 Player::open_subtitles_for_frame (DCPTime time) const
707 {
708         list<PositionImage> captions;
709         int const vfr = _film->video_frame_rate();
710
711         BOOST_FOREACH (
712                 PlayerText j,
713                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
714                 ) {
715
716                 /* Bitmap subtitles */
717                 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
718                 copy (c.begin(), c.end(), back_inserter (captions));
719
720                 /* String subtitles (rendered to an image) */
721                 if (!j.string.empty ()) {
722                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
723                         copy (s.begin(), s.end(), back_inserter (captions));
724                 }
725         }
726
727         if (captions.empty ()) {
728                 return optional<PositionImage> ();
729         }
730
731         return merge (captions);
732 }
733
734 void
735 Player::video (weak_ptr<Piece> wp, ContentVideo video)
736 {
737         shared_ptr<Piece> piece = wp.lock ();
738         if (!piece) {
739                 return;
740         }
741
742         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
743         if (frc.skip && (video.frame % 2) == 1) {
744                 return;
745         }
746
747         /* Time of the first frame we will emit */
748         DCPTime const time = content_video_to_dcp (piece, video.frame);
749
750         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
751            if it's after the content's period here as in that case we still need to fill any gap between
752            `now' and the end of the content's period.
753         */
754         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
755                 return;
756         }
757
758         /* Fill gaps that we discover now that we have some video which needs to be emitted.
759            This is where we need to fill to.
760         */
761         DCPTime fill_to = min (time, piece->content->end());
762
763         if (_last_video_time) {
764                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
765                 LastVideoMap::const_iterator last = _last_video.find (wp);
766                 if (_film->three_d()) {
767                         Eyes fill_to_eyes = video.eyes;
768                         if (fill_to_eyes == EYES_BOTH) {
769                                 fill_to_eyes = EYES_LEFT;
770                         }
771                         if (fill_to == piece->content->end()) {
772                                 /* Don't fill after the end of the content */
773                                 fill_to_eyes = EYES_LEFT;
774                         }
775                         DCPTime j = fill_from;
776                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
777                         if (eyes == EYES_BOTH) {
778                                 eyes = EYES_LEFT;
779                         }
780                         while (j < fill_to || eyes != fill_to_eyes) {
781                                 if (last != _last_video.end()) {
782                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
783                                         copy->set_eyes (eyes);
784                                         emit_video (copy, j);
785                                 } else {
786                                         emit_video (black_player_video_frame(eyes), j);
787                                 }
788                                 if (eyes == EYES_RIGHT) {
789                                         j += one_video_frame();
790                                 }
791                                 eyes = increment_eyes (eyes);
792                         }
793                 } else {
794                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
795                                 if (last != _last_video.end()) {
796                                         emit_video (last->second, j);
797                                 } else {
798                                         emit_video (black_player_video_frame(EYES_BOTH), j);
799                                 }
800                         }
801                 }
802         }
803
804         _last_video[wp].reset (
805                 new PlayerVideo (
806                         video.image,
807                         piece->content->video->crop (),
808                         piece->content->video->fade (video.frame),
809                         piece->content->video->scale().size (
810                                 piece->content->video, _video_container_size, _film->frame_size ()
811                                 ),
812                         _video_container_size,
813                         video.eyes,
814                         video.part,
815                         piece->content->video->colour_conversion(),
816                         piece->content,
817                         video.frame
818                         )
819                 );
820
821         DCPTime t = time;
822         for (int i = 0; i < frc.repeat; ++i) {
823                 if (t < piece->content->end()) {
824                         emit_video (_last_video[wp], t);
825                 }
826                 t += one_video_frame ();
827         }
828 }
829
830 void
831 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
832 {
833         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
834
835         shared_ptr<Piece> piece = wp.lock ();
836         if (!piece) {
837                 return;
838         }
839
840         shared_ptr<AudioContent> content = piece->content->audio;
841         DCPOMATIC_ASSERT (content);
842
843         /* Compute time in the DCP */
844         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
845         /* And the end of this block in the DCP */
846         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
847
848         /* Remove anything that comes before the start or after the end of the content */
849         if (time < piece->content->position()) {
850                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
851                 if (!cut.first) {
852                         /* This audio is entirely discarded */
853                         return;
854                 }
855                 content_audio.audio = cut.first;
856                 time = cut.second;
857         } else if (time > piece->content->end()) {
858                 /* Discard it all */
859                 return;
860         } else if (end > piece->content->end()) {
861                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
862                 if (remaining_frames == 0) {
863                         return;
864                 }
865                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
866                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
867                 content_audio.audio = cut;
868         }
869
870         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
871
872         /* Gain */
873
874         if (content->gain() != 0) {
875                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
876                 gain->apply_gain (content->gain ());
877                 content_audio.audio = gain;
878         }
879
880         /* Remap */
881
882         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
883
884         /* Process */
885
886         if (_audio_processor) {
887                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
888         }
889
890         /* Push */
891
892         _audio_merger.push (content_audio.audio, time);
893         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
894         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
895 }
896
897 void
898 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
899 {
900         shared_ptr<Piece> piece = wp.lock ();
901         shared_ptr<const TextContent> text = wc.lock ();
902         if (!piece || !text) {
903                 return;
904         }
905
906         /* Apply content's subtitle offsets */
907         subtitle.sub.rectangle.x += text->x_offset ();
908         subtitle.sub.rectangle.y += text->y_offset ();
909
910         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
911         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
912         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
913
914         /* Apply content's subtitle scale */
915         subtitle.sub.rectangle.width *= text->x_scale ();
916         subtitle.sub.rectangle.height *= text->y_scale ();
917
918         PlayerText ps;
919         ps.bitmap.push_back (subtitle.sub);
920         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
921
922         _active_texts[text->type()].add_from (wc, ps, from);
923 }
924
925 void
926 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
927 {
928         shared_ptr<Piece> piece = wp.lock ();
929         shared_ptr<const TextContent> text = wc.lock ();
930         if (!piece || !text) {
931                 return;
932         }
933
934         PlayerText ps;
935         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
936
937         if (from > piece->content->end()) {
938                 return;
939         }
940
941         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
942                 s.set_h_position (s.h_position() + text->x_offset ());
943                 s.set_v_position (s.v_position() + text->y_offset ());
944                 float const xs = text->x_scale();
945                 float const ys = text->y_scale();
946                 float size = s.size();
947
948                 /* Adjust size to express the common part of the scaling;
949                    e.g. if xs = ys = 0.5 we scale size by 2.
950                 */
951                 if (xs > 1e-5 && ys > 1e-5) {
952                         size *= 1 / min (1 / xs, 1 / ys);
953                 }
954                 s.set_size (size);
955
956                 /* Then express aspect ratio changes */
957                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
958                         s.set_aspect_adjust (xs / ys);
959                 }
960
961                 s.set_in (dcp::Time(from.seconds(), 1000));
962                 ps.string.push_back (StringText (s, text->outline_width()));
963                 ps.add_fonts (text->fonts ());
964         }
965
966         _active_texts[text->type()].add_from (wc, ps, from);
967 }
968
969 void
970 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
971 {
972         shared_ptr<const TextContent> text = wc.lock ();
973         if (!text) {
974                 return;
975         }
976
977         if (!_active_texts[text->type()].have(wc)) {
978                 return;
979         }
980
981         shared_ptr<Piece> piece = wp.lock ();
982         if (!piece) {
983                 return;
984         }
985
986         DCPTime const dcp_to = content_time_to_dcp (piece, to);
987
988         if (dcp_to > piece->content->end()) {
989                 return;
990         }
991
992         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
993
994         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
995         if (text->use() && !always && !text->burn()) {
996                 Text (from.first, text->type(), text->dcp_track().get_value_or(DCPTextTrack()), DCPTimePeriod (from.second, dcp_to));
997         }
998 }
999
1000 void
1001 Player::seek (DCPTime time, bool accurate)
1002 {
1003         boost::mutex::scoped_lock lm (_mutex);
1004
1005         if (_suspended) {
1006                 /* We can't seek in this state */
1007                 return;
1008         }
1009
1010         if (_shuffler) {
1011                 _shuffler->clear ();
1012         }
1013
1014         _delay.clear ();
1015
1016         if (_audio_processor) {
1017                 _audio_processor->flush ();
1018         }
1019
1020         _audio_merger.clear ();
1021         for (int i = 0; i < TEXT_COUNT; ++i) {
1022                 _active_texts[i].clear ();
1023         }
1024
1025         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1026                 if (time < i->content->position()) {
1027                         /* Before; seek to the start of the content */
1028                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1029                         i->done = false;
1030                 } else if (i->content->position() <= time && time < i->content->end()) {
1031                         /* During; seek to position */
1032                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1033                         i->done = false;
1034                 } else {
1035                         /* After; this piece is done */
1036                         i->done = true;
1037                 }
1038         }
1039
1040         if (accurate) {
1041                 _last_video_time = time;
1042                 _last_video_eyes = EYES_LEFT;
1043                 _last_audio_time = time;
1044         } else {
1045                 _last_video_time = optional<DCPTime>();
1046                 _last_video_eyes = optional<Eyes>();
1047                 _last_audio_time = optional<DCPTime>();
1048         }
1049
1050         _black.set_position (time);
1051         _silent.set_position (time);
1052
1053         _last_video.clear ();
1054 }
1055
1056 void
1057 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1058 {
1059         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1060            player before the video that requires them.
1061         */
1062         _delay.push_back (make_pair (pv, time));
1063
1064         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1065                 _last_video_time = time + one_video_frame();
1066         }
1067         _last_video_eyes = increment_eyes (pv->eyes());
1068
1069         if (_delay.size() < 3) {
1070                 return;
1071         }
1072
1073         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1074         _delay.pop_front();
1075         do_emit_video (to_do.first, to_do.second);
1076 }
1077
1078 void
1079 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1080 {
1081         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1082                 for (int i = 0; i < TEXT_COUNT; ++i) {
1083                         _active_texts[i].clear_before (time);
1084                 }
1085         }
1086
1087         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1088         if (subtitles) {
1089                 pv->set_text (subtitles.get ());
1090         }
1091
1092         Video (pv, time);
1093 }
1094
1095 void
1096 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1097 {
1098         /* Log if the assert below is about to fail */
1099         if (_last_audio_time && time != *_last_audio_time) {
1100                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1101         }
1102
1103         /* This audio must follow on from the previous */
1104         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1105         Audio (data, time);
1106         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1107 }
1108
1109 void
1110 Player::fill_audio (DCPTimePeriod period)
1111 {
1112         if (period.from == period.to) {
1113                 return;
1114         }
1115
1116         DCPOMATIC_ASSERT (period.from < period.to);
1117
1118         DCPTime t = period.from;
1119         while (t < period.to) {
1120                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1121                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1122                 if (samples) {
1123                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1124                         silence->make_silent ();
1125                         emit_audio (silence, t);
1126                 }
1127                 t += block;
1128         }
1129 }
1130
1131 DCPTime
1132 Player::one_video_frame () const
1133 {
1134         return DCPTime::from_frames (1, _film->video_frame_rate ());
1135 }
1136
1137 pair<shared_ptr<AudioBuffers>, DCPTime>
1138 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1139 {
1140         DCPTime const discard_time = discard_to - time;
1141         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1142         Frame remaining_frames = audio->frames() - discard_frames;
1143         if (remaining_frames <= 0) {
1144                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1145         }
1146         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1147         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1148         return make_pair(cut, time + discard_time);
1149 }
1150
1151 void
1152 Player::set_dcp_decode_reduction (optional<int> reduction)
1153 {
1154         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1155
1156         {
1157                 boost::mutex::scoped_lock lm (_mutex);
1158
1159                 if (reduction == _dcp_decode_reduction) {
1160                         lm.unlock ();
1161                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1162                         return;
1163                 }
1164
1165                 _dcp_decode_reduction = reduction;
1166                 setup_pieces_unlocked ();
1167         }
1168
1169         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1170 }
1171
1172 optional<DCPTime>
1173 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1174 {
1175         boost::mutex::scoped_lock lm (_mutex);
1176
1177         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1178                 if (i->content == content) {
1179                         return content_time_to_dcp (i, t);
1180                 }
1181         }
1182
1183         /* We couldn't find this content; perhaps things are being changed over */
1184         return optional<DCPTime>();
1185 }