Protect the public API of Player with a mutex, since
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_text (false)
93         , _always_burn_open_subtitles (false)
94         , _fast (false)
95         , _play_referenced (false)
96         , _audio_merger (_film->audio_frame_rate())
97         , _shuffler (0)
98 {
99         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
100         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
101         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
102         set_video_container_size (_film->frame_size ());
103
104         film_changed (Film::AUDIO_PROCESSOR);
105
106         seek (DCPTime (), true);
107 }
108
109 Player::~Player ()
110 {
111         delete _shuffler;
112 }
113
114 void
115 Player::setup_pieces ()
116 {
117         _pieces.clear ();
118
119         delete _shuffler;
120         _shuffler = new Shuffler();
121         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
122
123         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
124
125                 if (!i->paths_valid ()) {
126                         continue;
127                 }
128
129                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
130                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
131
132                 if (!decoder) {
133                         /* Not something that we can decode; e.g. Atmos content */
134                         continue;
135                 }
136
137                 if (decoder->video && _ignore_video) {
138                         decoder->video->set_ignore (true);
139                 }
140
141                 if (_ignore_text) {
142                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
143                                 i->set_ignore (true);
144                         }
145                 }
146
147                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
148                 if (dcp) {
149                         dcp->set_decode_referenced (_play_referenced);
150                         if (_play_referenced) {
151                                 dcp->set_forced_reduction (_dcp_decode_reduction);
152                         }
153                 }
154
155                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
156                 _pieces.push_back (piece);
157
158                 if (decoder->video) {
159                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
160                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
161                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
162                         } else {
163                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
164                         }
165                 }
166
167                 if (decoder->audio) {
168                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
169                 }
170
171                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
172
173                 while (j != decoder->text.end()) {
174                         (*j)->BitmapStart.connect (
175                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
176                                 );
177                         (*j)->PlainStart.connect (
178                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
179                                 );
180                         (*j)->Stop.connect (
181                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
182                                 );
183
184                         ++j;
185                 }
186         }
187
188         _stream_states.clear ();
189         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
190                 if (i->content->audio) {
191                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
192                                 _stream_states[j] = StreamState (i, i->content->position ());
193                         }
194                 }
195         }
196
197         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
198         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
199
200         _last_video_time = DCPTime ();
201         _last_video_eyes = EYES_BOTH;
202         _last_audio_time = DCPTime ();
203         _have_valid_pieces = true;
204 }
205
206 void
207 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
208 {
209         shared_ptr<Content> c = w.lock ();
210         if (!c) {
211                 return;
212         }
213
214         if (
215                 property == ContentProperty::POSITION ||
216                 property == ContentProperty::LENGTH ||
217                 property == ContentProperty::TRIM_START ||
218                 property == ContentProperty::TRIM_END ||
219                 property == ContentProperty::PATH ||
220                 property == VideoContentProperty::FRAME_TYPE ||
221                 property == VideoContentProperty::COLOUR_CONVERSION ||
222                 property == AudioContentProperty::STREAMS ||
223                 property == DCPContentProperty::NEEDS_ASSETS ||
224                 property == DCPContentProperty::NEEDS_KDM ||
225                 property == TextContentProperty::COLOUR ||
226                 property == TextContentProperty::EFFECT ||
227                 property == TextContentProperty::EFFECT_COLOUR ||
228                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
229                 property == FFmpegContentProperty::FILTERS
230                 ) {
231
232                 _have_valid_pieces = false;
233                 Changed (property, frequent);
234
235         } else if (
236                 property == TextContentProperty::LINE_SPACING ||
237                 property == TextContentProperty::OUTLINE_WIDTH ||
238                 property == TextContentProperty::Y_SCALE ||
239                 property == TextContentProperty::FADE_IN ||
240                 property == TextContentProperty::FADE_OUT ||
241                 property == ContentProperty::VIDEO_FRAME_RATE ||
242                 property == TextContentProperty::USE ||
243                 property == TextContentProperty::X_OFFSET ||
244                 property == TextContentProperty::Y_OFFSET ||
245                 property == TextContentProperty::X_SCALE ||
246                 property == TextContentProperty::FONTS ||
247                 property == TextContentProperty::TYPE ||
248                 property == VideoContentProperty::CROP ||
249                 property == VideoContentProperty::SCALE ||
250                 property == VideoContentProperty::FADE_IN ||
251                 property == VideoContentProperty::FADE_OUT
252                 ) {
253
254                 Changed (property, frequent);
255         }
256 }
257
258 void
259 Player::set_video_container_size (dcp::Size s)
260 {
261         {
262                 boost::mutex::scoped_lock lm (_mutex);
263
264                 if (s == _video_container_size) {
265                         return;
266                 }
267
268                 _video_container_size = s;
269
270                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
271                 _black_image->make_black ();
272         }
273
274         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
275 }
276
277 void
278 Player::playlist_changed ()
279 {
280         _have_valid_pieces = false;
281         Changed (PlayerProperty::PLAYLIST, false);
282 }
283
284 void
285 Player::film_changed (Film::Property p)
286 {
287         /* Here we should notice Film properties that affect our output, and
288            alert listeners that our output now would be different to how it was
289            last time we were run.
290         */
291
292         if (p == Film::CONTAINER) {
293                 Changed (PlayerProperty::FILM_CONTAINER, false);
294         } else if (p == Film::VIDEO_FRAME_RATE) {
295                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
296                    so we need new pieces here.
297                 */
298                 _have_valid_pieces = false;
299                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
300         } else if (p == Film::AUDIO_PROCESSOR) {
301                 if (_film->audio_processor ()) {
302                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
303                 }
304         } else if (p == Film::AUDIO_CHANNELS) {
305                 _audio_merger.clear ();
306         }
307 }
308
309 list<PositionImage>
310 Player::transform_bitmap_texts (list<BitmapText> subs) const
311 {
312         list<PositionImage> all;
313
314         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
315                 if (!i->image) {
316                         continue;
317                 }
318
319                 /* We will scale the subtitle up to fit _video_container_size */
320                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
321
322                 all.push_back (
323                         PositionImage (
324                                 i->image->scale (
325                                         scaled_size,
326                                         dcp::YUV_TO_RGB_REC601,
327                                         i->image->pixel_format (),
328                                         true,
329                                         _fast
330                                         ),
331                                 Position<int> (
332                                         lrint (_video_container_size.width * i->rectangle.x),
333                                         lrint (_video_container_size.height * i->rectangle.y)
334                                         )
335                                 )
336                         );
337         }
338
339         return all;
340 }
341
342 shared_ptr<PlayerVideo>
343 Player::black_player_video_frame (Eyes eyes) const
344 {
345         return shared_ptr<PlayerVideo> (
346                 new PlayerVideo (
347                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
348                         Crop (),
349                         optional<double> (),
350                         _video_container_size,
351                         _video_container_size,
352                         eyes,
353                         PART_WHOLE,
354                         PresetColourConversion::all().front().conversion,
355                         boost::weak_ptr<Content>(),
356                         boost::optional<Frame>()
357                 )
358         );
359 }
360
361 Frame
362 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
363 {
364         DCPTime s = t - piece->content->position ();
365         s = min (piece->content->length_after_trim(), s);
366         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
367
368         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
369            then convert that ContentTime to frames at the content's rate.  However this fails for
370            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
371            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
372
373            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
374         */
375         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
376 }
377
378 DCPTime
379 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
380 {
381         /* See comment in dcp_to_content_video */
382         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
383         return d + piece->content->position();
384 }
385
386 Frame
387 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
388 {
389         DCPTime s = t - piece->content->position ();
390         s = min (piece->content->length_after_trim(), s);
391         /* See notes in dcp_to_content_video */
392         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
393 }
394
395 DCPTime
396 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
397 {
398         /* See comment in dcp_to_content_video */
399         return DCPTime::from_frames (f, _film->audio_frame_rate())
400                 - DCPTime (piece->content->trim_start(), piece->frc)
401                 + piece->content->position();
402 }
403
404 ContentTime
405 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
406 {
407         DCPTime s = t - piece->content->position ();
408         s = min (piece->content->length_after_trim(), s);
409         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
410 }
411
412 DCPTime
413 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
414 {
415         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
416 }
417
418 list<shared_ptr<Font> >
419 Player::get_subtitle_fonts ()
420 {
421         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
422
423         if (!_have_valid_pieces) {
424                 setup_pieces ();
425         }
426
427         list<shared_ptr<Font> > fonts;
428         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
429                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
430                         /* XXX: things may go wrong if there are duplicate font IDs
431                            with different font files.
432                         */
433                         list<shared_ptr<Font> > f = j->fonts ();
434                         copy (f.begin(), f.end(), back_inserter (fonts));
435                 }
436         }
437
438         return fonts;
439 }
440
441 /** Set this player never to produce any video data */
442 void
443 Player::set_ignore_video ()
444 {
445         boost::mutex::scoped_lock lm (_mutex);
446         _ignore_video = true;
447 }
448
449 void
450 Player::set_ignore_text ()
451 {
452         boost::mutex::scoped_lock lm (_mutex);
453         _ignore_text = true;
454 }
455
456 /** Set the player to always burn open texts into the image regardless of the content settings */
457 void
458 Player::set_always_burn_open_subtitles ()
459 {
460         boost::mutex::scoped_lock lm (_mutex);
461         _always_burn_open_subtitles = true;
462 }
463
464 /** Sets up the player to be faster, possibly at the expense of quality */
465 void
466 Player::set_fast ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _fast = true;
470         _have_valid_pieces = false;
471 }
472
473 void
474 Player::set_play_referenced ()
475 {
476         boost::mutex::scoped_lock lm (_mutex);
477         _play_referenced = true;
478         _have_valid_pieces = false;
479 }
480
481 list<ReferencedReelAsset>
482 Player::get_reel_assets ()
483 {
484         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
485
486         list<ReferencedReelAsset> a;
487
488         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
489                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
490                 if (!j) {
491                         continue;
492                 }
493
494                 scoped_ptr<DCPDecoder> decoder;
495                 try {
496                         decoder.reset (new DCPDecoder (j, _film->log(), false));
497                 } catch (...) {
498                         return a;
499                 }
500
501                 int64_t offset = 0;
502                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
503
504                         DCPOMATIC_ASSERT (j->video_frame_rate ());
505                         double const cfr = j->video_frame_rate().get();
506                         Frame const trim_start = j->trim_start().frames_round (cfr);
507                         Frame const trim_end = j->trim_end().frames_round (cfr);
508                         int const ffr = _film->video_frame_rate ();
509
510                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
511                         if (j->reference_video ()) {
512                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
513                                 DCPOMATIC_ASSERT (ra);
514                                 ra->set_entry_point (ra->entry_point() + trim_start);
515                                 ra->set_duration (ra->duration() - trim_start - trim_end);
516                                 a.push_back (
517                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
518                                         );
519                         }
520
521                         if (j->reference_audio ()) {
522                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
523                                 DCPOMATIC_ASSERT (ra);
524                                 ra->set_entry_point (ra->entry_point() + trim_start);
525                                 ra->set_duration (ra->duration() - trim_start - trim_end);
526                                 a.push_back (
527                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
528                                         );
529                         }
530
531                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
532                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
533                                 DCPOMATIC_ASSERT (ra);
534                                 ra->set_entry_point (ra->entry_point() + trim_start);
535                                 ra->set_duration (ra->duration() - trim_start - trim_end);
536                                 a.push_back (
537                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
538                                         );
539                         }
540
541                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
542                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
543                                 DCPOMATIC_ASSERT (ra);
544                                 ra->set_entry_point (ra->entry_point() + trim_start);
545                                 ra->set_duration (ra->duration() - trim_start - trim_end);
546                                 a.push_back (
547                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
548                                         );
549                         }
550
551                         /* Assume that main picture duration is the length of the reel */
552                         offset += k->main_picture()->duration ();
553                 }
554         }
555
556         return a;
557 }
558
559 bool
560 Player::pass ()
561 {
562         boost::mutex::scoped_lock lm (_mutex);
563
564         if (!_have_valid_pieces) {
565                 setup_pieces ();
566         }
567
568         if (_playlist->length() == DCPTime()) {
569                 /* Special case of an empty Film; just give one black frame */
570                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
571                 return true;
572         }
573
574         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
575
576         shared_ptr<Piece> earliest_content;
577         optional<DCPTime> earliest_time;
578
579         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
580                 if (i->done) {
581                         continue;
582                 }
583
584                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
585                 if (t > i->content->end()) {
586                         i->done = true;
587                 } else {
588
589                         /* Given two choices at the same time, pick the one with texts so we see it before
590                            the video.
591                         */
592                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
593                                 earliest_time = t;
594                                 earliest_content = i;
595                         }
596                 }
597         }
598
599         bool done = false;
600
601         enum {
602                 NONE,
603                 CONTENT,
604                 BLACK,
605                 SILENT
606         } which = NONE;
607
608         if (earliest_content) {
609                 which = CONTENT;
610         }
611
612         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
613                 earliest_time = _black.position ();
614                 which = BLACK;
615         }
616
617         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
618                 earliest_time = _silent.position ();
619                 which = SILENT;
620         }
621
622         switch (which) {
623         case CONTENT:
624                 earliest_content->done = earliest_content->decoder->pass ();
625                 break;
626         case BLACK:
627                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
628                 _black.set_position (_black.position() + one_video_frame());
629                 break;
630         case SILENT:
631         {
632                 DCPTimePeriod period (_silent.period_at_position());
633                 if (_last_audio_time) {
634                         /* Sometimes the thing that happened last finishes fractionally before
635                            this silence.  Bodge the start time of the silence to fix it.  I'm
636                            not sure if this is the right solution --- maybe the last thing should
637                            be padded `forward' rather than this thing padding `back'.
638                         */
639                         period.from = min(period.from, *_last_audio_time);
640                 }
641                 if (period.duration() > one_video_frame()) {
642                         period.to = period.from + one_video_frame();
643                 }
644                 fill_audio (period);
645                 _silent.set_position (period.to);
646                 break;
647         }
648         case NONE:
649                 done = true;
650                 break;
651         }
652
653         /* Emit any audio that is ready */
654
655         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
656            of our streams, or the position of the _silent.
657         */
658         DCPTime pull_to = _film->length ();
659         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
660                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
661                         pull_to = i->second.last_push_end;
662                 }
663         }
664         if (!_silent.done() && _silent.position() < pull_to) {
665                 pull_to = _silent.position();
666         }
667
668         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
669         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
670                 if (_last_audio_time && i->second < *_last_audio_time) {
671                         /* This new data comes before the last we emitted (or the last seek); discard it */
672                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
673                         if (!cut.first) {
674                                 continue;
675                         }
676                         *i = cut;
677                 } else if (_last_audio_time && i->second > *_last_audio_time) {
678                         /* There's a gap between this data and the last we emitted; fill with silence */
679                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
680                 }
681
682                 emit_audio (i->first, i->second);
683         }
684
685         if (done) {
686                 _shuffler->flush ();
687                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
688                         do_emit_video(i->first, i->second);
689                 }
690         }
691
692         return done;
693 }
694
695 list<PlayerText>
696 Player::closed_captions_for_frame (DCPTime time) const
697 {
698         boost::mutex::scoped_lock _lm (_mutex);
699         return _active_texts[TEXT_CLOSED_CAPTION].get (
700                 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
701                 );
702 }
703
704 /** @return Open subtitles for the frame at the given time, converted to images */
705 optional<PositionImage>
706 Player::open_subtitles_for_frame (DCPTime time) const
707 {
708         list<PositionImage> captions;
709         int const vfr = _film->video_frame_rate();
710
711         BOOST_FOREACH (
712                 PlayerText j,
713                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
714                 ) {
715
716                 /* Image subtitles */
717                 list<PositionImage> c = transform_bitmap_texts (j.image);
718                 copy (c.begin(), c.end(), back_inserter (captions));
719
720                 /* Text subtitles (rendered to an image) */
721                 if (!j.text.empty ()) {
722                         list<PositionImage> s = render_text (j.text, j.fonts, _video_container_size, time, vfr);
723                         copy (s.begin(), s.end(), back_inserter (captions));
724                 }
725         }
726
727         if (captions.empty ()) {
728                 return optional<PositionImage> ();
729         }
730
731         return merge (captions);
732 }
733
734 void
735 Player::video (weak_ptr<Piece> wp, ContentVideo video)
736 {
737         shared_ptr<Piece> piece = wp.lock ();
738         if (!piece) {
739                 return;
740         }
741
742         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
743         if (frc.skip && (video.frame % 2) == 1) {
744                 return;
745         }
746
747         /* Time of the first frame we will emit */
748         DCPTime const time = content_video_to_dcp (piece, video.frame);
749
750         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
751            if it's after the content's period here as in that case we still need to fill any gap between
752            `now' and the end of the content's period.
753         */
754         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
755                 return;
756         }
757
758         /* Fill gaps that we discover now that we have some video which needs to be emitted.
759            This is where we need to fill to.
760         */
761         DCPTime fill_to = min (time, piece->content->end());
762
763         if (_last_video_time) {
764                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
765                 LastVideoMap::const_iterator last = _last_video.find (wp);
766                 if (_film->three_d()) {
767                         DCPTime j = fill_from;
768                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
769                         if (eyes == EYES_BOTH) {
770                                 eyes = EYES_LEFT;
771                         }
772                         while (j < fill_to || eyes != video.eyes) {
773                                 if (last != _last_video.end()) {
774                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
775                                         copy->set_eyes (eyes);
776                                         emit_video (copy, j);
777                                 } else {
778                                         emit_video (black_player_video_frame(eyes), j);
779                                 }
780                                 if (eyes == EYES_RIGHT) {
781                                         j += one_video_frame();
782                                 }
783                                 eyes = increment_eyes (eyes);
784                         }
785                 } else {
786                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
787                                 if (last != _last_video.end()) {
788                                         emit_video (last->second, j);
789                                 } else {
790                                         emit_video (black_player_video_frame(EYES_BOTH), j);
791                                 }
792                         }
793                 }
794         }
795
796         _last_video[wp].reset (
797                 new PlayerVideo (
798                         video.image,
799                         piece->content->video->crop (),
800                         piece->content->video->fade (video.frame),
801                         piece->content->video->scale().size (
802                                 piece->content->video, _video_container_size, _film->frame_size ()
803                                 ),
804                         _video_container_size,
805                         video.eyes,
806                         video.part,
807                         piece->content->video->colour_conversion(),
808                         piece->content,
809                         video.frame
810                         )
811                 );
812
813         DCPTime t = time;
814         for (int i = 0; i < frc.repeat; ++i) {
815                 if (t < piece->content->end()) {
816                         emit_video (_last_video[wp], t);
817                 }
818                 t += one_video_frame ();
819         }
820 }
821
822 void
823 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
824 {
825         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
826
827         shared_ptr<Piece> piece = wp.lock ();
828         if (!piece) {
829                 return;
830         }
831
832         shared_ptr<AudioContent> content = piece->content->audio;
833         DCPOMATIC_ASSERT (content);
834
835         /* Compute time in the DCP */
836         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
837         /* And the end of this block in the DCP */
838         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
839
840         /* Remove anything that comes before the start or after the end of the content */
841         if (time < piece->content->position()) {
842                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
843                 if (!cut.first) {
844                         /* This audio is entirely discarded */
845                         return;
846                 }
847                 content_audio.audio = cut.first;
848                 time = cut.second;
849         } else if (time > piece->content->end()) {
850                 /* Discard it all */
851                 return;
852         } else if (end > piece->content->end()) {
853                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
854                 if (remaining_frames == 0) {
855                         return;
856                 }
857                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
858                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
859                 content_audio.audio = cut;
860         }
861
862         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
863
864         /* Gain */
865
866         if (content->gain() != 0) {
867                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
868                 gain->apply_gain (content->gain ());
869                 content_audio.audio = gain;
870         }
871
872         /* Remap */
873
874         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
875
876         /* Process */
877
878         if (_audio_processor) {
879                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
880         }
881
882         /* Push */
883
884         _audio_merger.push (content_audio.audio, time);
885         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
886         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
887 }
888
889 void
890 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
891 {
892         shared_ptr<Piece> piece = wp.lock ();
893         shared_ptr<const TextContent> text = wc.lock ();
894         if (!piece || !text) {
895                 return;
896         }
897
898         /* Apply content's subtitle offsets */
899         subtitle.sub.rectangle.x += text->x_offset ();
900         subtitle.sub.rectangle.y += text->y_offset ();
901
902         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
903         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
904         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
905
906         /* Apply content's subtitle scale */
907         subtitle.sub.rectangle.width *= text->x_scale ();
908         subtitle.sub.rectangle.height *= text->y_scale ();
909
910         PlayerText ps;
911         ps.image.push_back (subtitle.sub);
912         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
913
914         _active_texts[subtitle.type()].add_from (wc, ps, from);
915 }
916
917 void
918 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
919 {
920         shared_ptr<Piece> piece = wp.lock ();
921         shared_ptr<const TextContent> text = wc.lock ();
922         if (!piece || !text) {
923                 return;
924         }
925
926         PlayerText ps;
927         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
928
929         if (from > piece->content->end()) {
930                 return;
931         }
932
933         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
934                 s.set_h_position (s.h_position() + text->x_offset ());
935                 s.set_v_position (s.v_position() + text->y_offset ());
936                 float const xs = text->x_scale();
937                 float const ys = text->y_scale();
938                 float size = s.size();
939
940                 /* Adjust size to express the common part of the scaling;
941                    e.g. if xs = ys = 0.5 we scale size by 2.
942                 */
943                 if (xs > 1e-5 && ys > 1e-5) {
944                         size *= 1 / min (1 / xs, 1 / ys);
945                 }
946                 s.set_size (size);
947
948                 /* Then express aspect ratio changes */
949                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
950                         s.set_aspect_adjust (xs / ys);
951                 }
952
953                 s.set_in (dcp::Time(from.seconds(), 1000));
954                 ps.text.push_back (StringText (s, text->outline_width()));
955                 ps.add_fonts (text->fonts ());
956         }
957
958         _active_texts[subtitle.type()].add_from (wc, ps, from);
959 }
960
961 void
962 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
963 {
964         if (!_active_texts[type].have (wc)) {
965                 return;
966         }
967
968         shared_ptr<Piece> piece = wp.lock ();
969         shared_ptr<const TextContent> text = wc.lock ();
970         if (!piece || !text) {
971                 return;
972         }
973
974         DCPTime const dcp_to = content_time_to_dcp (piece, to);
975
976         if (dcp_to > piece->content->end()) {
977                 return;
978         }
979
980         pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
981
982         bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
983         if (text->use() && !always && !text->burn()) {
984                 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
985         }
986 }
987
988 void
989 Player::seek (DCPTime time, bool accurate)
990 {
991         boost::mutex::scoped_lock lm (_mutex);
992
993         if (!_have_valid_pieces) {
994                 setup_pieces ();
995         }
996
997         if (_shuffler) {
998                 _shuffler->clear ();
999         }
1000
1001         _delay.clear ();
1002
1003         if (_audio_processor) {
1004                 _audio_processor->flush ();
1005         }
1006
1007         _audio_merger.clear ();
1008         for (int i = 0; i < TEXT_COUNT; ++i) {
1009                 _active_texts[i].clear ();
1010         }
1011
1012         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1013                 if (time < i->content->position()) {
1014                         /* Before; seek to the start of the content */
1015                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1016                         i->done = false;
1017                 } else if (i->content->position() <= time && time < i->content->end()) {
1018                         /* During; seek to position */
1019                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1020                         i->done = false;
1021                 } else {
1022                         /* After; this piece is done */
1023                         i->done = true;
1024                 }
1025         }
1026
1027         if (accurate) {
1028                 _last_video_time = time;
1029                 _last_video_eyes = EYES_LEFT;
1030                 _last_audio_time = time;
1031         } else {
1032                 _last_video_time = optional<DCPTime>();
1033                 _last_video_eyes = optional<Eyes>();
1034                 _last_audio_time = optional<DCPTime>();
1035         }
1036
1037         _black.set_position (time);
1038         _silent.set_position (time);
1039
1040         _last_video.clear ();
1041 }
1042
1043 void
1044 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1045 {
1046         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1047            player before the video that requires them.
1048         */
1049         _delay.push_back (make_pair (pv, time));
1050
1051         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1052                 _last_video_time = time + one_video_frame();
1053         }
1054         _last_video_eyes = increment_eyes (pv->eyes());
1055
1056         if (_delay.size() < 3) {
1057                 return;
1058         }
1059
1060         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1061         _delay.pop_front();
1062         do_emit_video (to_do.first, to_do.second);
1063 }
1064
1065 void
1066 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1067 {
1068         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1069                 for (int i = 0; i < TEXT_COUNT; ++i) {
1070                         _active_texts[i].clear_before (time);
1071                 }
1072         }
1073
1074         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1075         if (subtitles) {
1076                 pv->set_text (subtitles.get ());
1077         }
1078
1079         Video (pv, time);
1080 }
1081
1082 void
1083 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1084 {
1085         /* Log if the assert below is about to fail */
1086         if (_last_audio_time && time != *_last_audio_time) {
1087                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1088         }
1089
1090         /* This audio must follow on from the previous */
1091         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1092         Audio (data, time);
1093         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1094 }
1095
1096 void
1097 Player::fill_audio (DCPTimePeriod period)
1098 {
1099         if (period.from == period.to) {
1100                 return;
1101         }
1102
1103         DCPOMATIC_ASSERT (period.from < period.to);
1104
1105         DCPTime t = period.from;
1106         while (t < period.to) {
1107                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1108                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1109                 if (samples) {
1110                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1111                         silence->make_silent ();
1112                         emit_audio (silence, t);
1113                 }
1114                 t += block;
1115         }
1116 }
1117
1118 DCPTime
1119 Player::one_video_frame () const
1120 {
1121         return DCPTime::from_frames (1, _film->video_frame_rate ());
1122 }
1123
1124 pair<shared_ptr<AudioBuffers>, DCPTime>
1125 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1126 {
1127         DCPTime const discard_time = discard_to - time;
1128         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1129         Frame remaining_frames = audio->frames() - discard_frames;
1130         if (remaining_frames <= 0) {
1131                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1132         }
1133         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1134         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1135         return make_pair(cut, time + discard_time);
1136 }
1137
1138 void
1139 Player::set_dcp_decode_reduction (optional<int> reduction)
1140 {
1141         {
1142                 boost::mutex::scoped_lock lm (_mutex);
1143
1144                 if (reduction == _dcp_decode_reduction) {
1145                         return;
1146                 }
1147
1148                 _dcp_decode_reduction = reduction;
1149                 _have_valid_pieces = false;
1150         }
1151
1152         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1153 }
1154
1155 DCPTime
1156 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1157 {
1158         boost::mutex::scoped_lock lm (_mutex);
1159
1160         if (_have_valid_pieces) {
1161                 setup_pieces ();
1162         }
1163
1164         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1165                 if (i->content == content) {
1166                         return content_time_to_dcp (i, t);
1167                 }
1168         }
1169
1170         DCPOMATIC_ASSERT (false);
1171         return DCPTime ();
1172 }