Remove TextType from various places as (I believe) it can be inferred from the content.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101         /* The butler must hear about this first, so since we are proxying this through to the butler we must
102            be first.
103         */
104         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1), boost::signals2::at_front);
105         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
106         set_video_container_size (_film->frame_size ());
107
108         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
109
110         setup_pieces ();
111         seek (DCPTime (), true);
112 }
113
114 Player::~Player ()
115 {
116         delete _shuffler;
117 }
118
119 void
120 Player::setup_pieces ()
121 {
122         boost::mutex::scoped_lock lm (_mutex);
123         setup_pieces_unlocked ();
124 }
125
126 void
127 Player::setup_pieces_unlocked ()
128 {
129         _pieces.clear ();
130
131         delete _shuffler;
132         _shuffler = new Shuffler();
133         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
134
135         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
136
137                 if (!i->paths_valid ()) {
138                         continue;
139                 }
140
141                 if (_ignore_video && _ignore_audio && i->text.empty()) {
142                         /* We're only interested in text and this content has none */
143                         continue;
144                 }
145
146                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
147                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
148
149                 if (!decoder) {
150                         /* Not something that we can decode; e.g. Atmos content */
151                         continue;
152                 }
153
154                 if (decoder->video && _ignore_video) {
155                         decoder->video->set_ignore (true);
156                 }
157
158                 if (decoder->audio && _ignore_audio) {
159                         decoder->audio->set_ignore (true);
160                 }
161
162                 if (_ignore_text) {
163                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
164                                 i->set_ignore (true);
165                         }
166                 }
167
168                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
169                 if (dcp) {
170                         dcp->set_decode_referenced (_play_referenced);
171                         if (_play_referenced) {
172                                 dcp->set_forced_reduction (_dcp_decode_reduction);
173                         }
174                 }
175
176                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
177                 _pieces.push_back (piece);
178
179                 if (decoder->video) {
180                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
181                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
182                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
183                         } else {
184                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
185                         }
186                 }
187
188                 if (decoder->audio) {
189                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
190                 }
191
192                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
193
194                 while (j != decoder->text.end()) {
195                         (*j)->BitmapStart.connect (
196                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
197                                 );
198                         (*j)->PlainStart.connect (
199                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
200                                 );
201                         (*j)->Stop.connect (
202                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
203                                 );
204
205                         ++j;
206                 }
207         }
208
209         _stream_states.clear ();
210         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
211                 if (i->content->audio) {
212                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
213                                 _stream_states[j] = StreamState (i, i->content->position ());
214                         }
215                 }
216         }
217
218         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
219         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
220
221         _last_video_time = DCPTime ();
222         _last_video_eyes = EYES_BOTH;
223         _last_audio_time = DCPTime ();
224         _suspended = false;
225 }
226
227 void
228 Player::playlist_content_change (ChangeType type, int property, bool frequent)
229 {
230         if (type == CHANGE_TYPE_PENDING) {
231                 boost::mutex::scoped_lock lm (_mutex);
232                 /* The player content is probably about to change, so we can't carry on
233                    until that has happened and we've rebuilt our pieces.  Stop pass()
234                    and seek() from working until then.
235                 */
236                 _suspended = true;
237         } else if (type == CHANGE_TYPE_DONE) {
238                 /* A change in our content has gone through.  Re-build our pieces. */
239                 setup_pieces ();
240         }
241
242         Change (type, property, frequent);
243 }
244
245 void
246 Player::set_video_container_size (dcp::Size s)
247 {
248         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
249
250         {
251                 boost::mutex::scoped_lock lm (_mutex);
252
253                 if (s == _video_container_size) {
254                         lm.unlock ();
255                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
256                         return;
257                 }
258
259                 _video_container_size = s;
260
261                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
262                 _black_image->make_black ();
263         }
264
265         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
266 }
267
268 void
269 Player::playlist_change (ChangeType type)
270 {
271         if (type == CHANGE_TYPE_DONE) {
272                 setup_pieces ();
273         }
274         Change (type, PlayerProperty::PLAYLIST, false);
275 }
276
277 void
278 Player::film_change (ChangeType type, Film::Property p)
279 {
280         /* Here we should notice Film properties that affect our output, and
281            alert listeners that our output now would be different to how it was
282            last time we were run.
283         */
284
285         if (p == Film::CONTAINER) {
286                 Change (type, PlayerProperty::FILM_CONTAINER, false);
287         } else if (p == Film::VIDEO_FRAME_RATE) {
288                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
289                    so we need new pieces here.
290                 */
291                 if (type == CHANGE_TYPE_DONE) {
292                         setup_pieces ();
293                 }
294                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
295         } else if (p == Film::AUDIO_PROCESSOR) {
296                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
297                         boost::mutex::scoped_lock lm (_mutex);
298                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
299                 }
300         } else if (p == Film::AUDIO_CHANNELS) {
301                 if (type == CHANGE_TYPE_DONE) {
302                         boost::mutex::scoped_lock lm (_mutex);
303                         _audio_merger.clear ();
304                 }
305         }
306 }
307
308 list<PositionImage>
309 Player::transform_bitmap_texts (list<BitmapText> subs) const
310 {
311         list<PositionImage> all;
312
313         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
314                 if (!i->image) {
315                         continue;
316                 }
317
318                 /* We will scale the subtitle up to fit _video_container_size */
319                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
320
321                 all.push_back (
322                         PositionImage (
323                                 i->image->scale (
324                                         scaled_size,
325                                         dcp::YUV_TO_RGB_REC601,
326                                         i->image->pixel_format (),
327                                         true,
328                                         _fast
329                                         ),
330                                 Position<int> (
331                                         lrint (_video_container_size.width * i->rectangle.x),
332                                         lrint (_video_container_size.height * i->rectangle.y)
333                                         )
334                                 )
335                         );
336         }
337
338         return all;
339 }
340
341 shared_ptr<PlayerVideo>
342 Player::black_player_video_frame (Eyes eyes) const
343 {
344         return shared_ptr<PlayerVideo> (
345                 new PlayerVideo (
346                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
347                         Crop (),
348                         optional<double> (),
349                         _video_container_size,
350                         _video_container_size,
351                         eyes,
352                         PART_WHOLE,
353                         PresetColourConversion::all().front().conversion,
354                         boost::weak_ptr<Content>(),
355                         boost::optional<Frame>()
356                 )
357         );
358 }
359
360 Frame
361 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
362 {
363         DCPTime s = t - piece->content->position ();
364         s = min (piece->content->length_after_trim(), s);
365         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
366
367         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
368            then convert that ContentTime to frames at the content's rate.  However this fails for
369            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
370            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
371
372            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
373         */
374         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
375 }
376
377 DCPTime
378 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
379 {
380         /* See comment in dcp_to_content_video */
381         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
382         return d + piece->content->position();
383 }
384
385 Frame
386 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
387 {
388         DCPTime s = t - piece->content->position ();
389         s = min (piece->content->length_after_trim(), s);
390         /* See notes in dcp_to_content_video */
391         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
392 }
393
394 DCPTime
395 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
396 {
397         /* See comment in dcp_to_content_video */
398         return DCPTime::from_frames (f, _film->audio_frame_rate())
399                 - DCPTime (piece->content->trim_start(), piece->frc)
400                 + piece->content->position();
401 }
402
403 ContentTime
404 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
405 {
406         DCPTime s = t - piece->content->position ();
407         s = min (piece->content->length_after_trim(), s);
408         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
409 }
410
411 DCPTime
412 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
413 {
414         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
415 }
416
417 list<shared_ptr<Font> >
418 Player::get_subtitle_fonts ()
419 {
420         boost::mutex::scoped_lock lm (_mutex);
421
422         list<shared_ptr<Font> > fonts;
423         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
424                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
425                         /* XXX: things may go wrong if there are duplicate font IDs
426                            with different font files.
427                         */
428                         list<shared_ptr<Font> > f = j->fonts ();
429                         copy (f.begin(), f.end(), back_inserter (fonts));
430                 }
431         }
432
433         return fonts;
434 }
435
436 /** Set this player never to produce any video data */
437 void
438 Player::set_ignore_video ()
439 {
440         boost::mutex::scoped_lock lm (_mutex);
441         _ignore_video = true;
442         setup_pieces_unlocked ();
443 }
444
445 void
446 Player::set_ignore_audio ()
447 {
448         boost::mutex::scoped_lock lm (_mutex);
449         _ignore_audio = true;
450         setup_pieces_unlocked ();
451 }
452
453 void
454 Player::set_ignore_text ()
455 {
456         boost::mutex::scoped_lock lm (_mutex);
457         _ignore_text = true;
458         setup_pieces_unlocked ();
459 }
460
461 /** Set the player to always burn open texts into the image regardless of the content settings */
462 void
463 Player::set_always_burn_open_subtitles ()
464 {
465         boost::mutex::scoped_lock lm (_mutex);
466         _always_burn_open_subtitles = true;
467 }
468
469 /** Sets up the player to be faster, possibly at the expense of quality */
470 void
471 Player::set_fast ()
472 {
473         boost::mutex::scoped_lock lm (_mutex);
474         _fast = true;
475         setup_pieces_unlocked ();
476 }
477
478 void
479 Player::set_play_referenced ()
480 {
481         boost::mutex::scoped_lock lm (_mutex);
482         _play_referenced = true;
483         setup_pieces_unlocked ();
484 }
485
486 list<ReferencedReelAsset>
487 Player::get_reel_assets ()
488 {
489         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
490
491         list<ReferencedReelAsset> a;
492
493         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
494                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
495                 if (!j) {
496                         continue;
497                 }
498
499                 scoped_ptr<DCPDecoder> decoder;
500                 try {
501                         decoder.reset (new DCPDecoder (j, _film->log(), false));
502                 } catch (...) {
503                         return a;
504                 }
505
506                 int64_t offset = 0;
507                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
508
509                         DCPOMATIC_ASSERT (j->video_frame_rate ());
510                         double const cfr = j->video_frame_rate().get();
511                         Frame const trim_start = j->trim_start().frames_round (cfr);
512                         Frame const trim_end = j->trim_end().frames_round (cfr);
513                         int const ffr = _film->video_frame_rate ();
514
515                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
516                         if (j->reference_video ()) {
517                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
518                                 DCPOMATIC_ASSERT (ra);
519                                 ra->set_entry_point (ra->entry_point() + trim_start);
520                                 ra->set_duration (ra->duration() - trim_start - trim_end);
521                                 a.push_back (
522                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
523                                         );
524                         }
525
526                         if (j->reference_audio ()) {
527                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
528                                 DCPOMATIC_ASSERT (ra);
529                                 ra->set_entry_point (ra->entry_point() + trim_start);
530                                 ra->set_duration (ra->duration() - trim_start - trim_end);
531                                 a.push_back (
532                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
533                                         );
534                         }
535
536                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
537                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
538                                 DCPOMATIC_ASSERT (ra);
539                                 ra->set_entry_point (ra->entry_point() + trim_start);
540                                 ra->set_duration (ra->duration() - trim_start - trim_end);
541                                 a.push_back (
542                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
543                                         );
544                         }
545
546                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
547                                 BOOST_FOREACH (shared_ptr<dcp::ReelClosedCaptionAsset> l, k->closed_captions()) {
548                                         DCPOMATIC_ASSERT (l);
549                                         l->set_entry_point (l->entry_point() + trim_start);
550                                         l->set_duration (l->duration() - trim_start - trim_end);
551                                         a.push_back (
552                                                 ReferencedReelAsset (l, DCPTimePeriod (from, from + DCPTime::from_frames (l->duration(), ffr)))
553                                                 );
554                                 }
555                         }
556
557                         /* Assume that main picture duration is the length of the reel */
558                         offset += k->main_picture()->duration ();
559                 }
560         }
561
562         return a;
563 }
564
565 bool
566 Player::pass ()
567 {
568         boost::mutex::scoped_lock lm (_mutex);
569
570         if (_suspended) {
571                 /* We can't pass in this state */
572                 return false;
573         }
574
575         if (_playlist->length() == DCPTime()) {
576                 /* Special case of an empty Film; just give one black frame */
577                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
578                 return true;
579         }
580
581         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
582
583         shared_ptr<Piece> earliest_content;
584         optional<DCPTime> earliest_time;
585
586         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
587                 if (i->done) {
588                         continue;
589                 }
590
591                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
592                 if (t > i->content->end()) {
593                         i->done = true;
594                 } else {
595
596                         /* Given two choices at the same time, pick the one with texts so we see it before
597                            the video.
598                         */
599                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
600                                 earliest_time = t;
601                                 earliest_content = i;
602                         }
603                 }
604         }
605
606         bool done = false;
607
608         enum {
609                 NONE,
610                 CONTENT,
611                 BLACK,
612                 SILENT
613         } which = NONE;
614
615         if (earliest_content) {
616                 which = CONTENT;
617         }
618
619         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
620                 earliest_time = _black.position ();
621                 which = BLACK;
622         }
623
624         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
625                 earliest_time = _silent.position ();
626                 which = SILENT;
627         }
628
629         switch (which) {
630         case CONTENT:
631                 earliest_content->done = earliest_content->decoder->pass ();
632                 break;
633         case BLACK:
634                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
635                 _black.set_position (_black.position() + one_video_frame());
636                 break;
637         case SILENT:
638         {
639                 DCPTimePeriod period (_silent.period_at_position());
640                 if (_last_audio_time) {
641                         /* Sometimes the thing that happened last finishes fractionally before
642                            this silence.  Bodge the start time of the silence to fix it.  I'm
643                            not sure if this is the right solution --- maybe the last thing should
644                            be padded `forward' rather than this thing padding `back'.
645                         */
646                         period.from = min(period.from, *_last_audio_time);
647                 }
648                 if (period.duration() > one_video_frame()) {
649                         period.to = period.from + one_video_frame();
650                 }
651                 fill_audio (period);
652                 _silent.set_position (period.to);
653                 break;
654         }
655         case NONE:
656                 done = true;
657                 break;
658         }
659
660         /* Emit any audio that is ready */
661
662         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
663            of our streams, or the position of the _silent.
664         */
665         DCPTime pull_to = _film->length ();
666         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
667                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
668                         pull_to = i->second.last_push_end;
669                 }
670         }
671         if (!_silent.done() && _silent.position() < pull_to) {
672                 pull_to = _silent.position();
673         }
674
675         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
676         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
677                 if (_last_audio_time && i->second < *_last_audio_time) {
678                         /* This new data comes before the last we emitted (or the last seek); discard it */
679                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
680                         if (!cut.first) {
681                                 continue;
682                         }
683                         *i = cut;
684                 } else if (_last_audio_time && i->second > *_last_audio_time) {
685                         /* There's a gap between this data and the last we emitted; fill with silence */
686                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
687                 }
688
689                 emit_audio (i->first, i->second);
690         }
691
692         if (done) {
693                 _shuffler->flush ();
694                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
695                         do_emit_video(i->first, i->second);
696                 }
697         }
698
699         return done;
700 }
701
702 /** @return Open subtitles for the frame at the given time, converted to images */
703 optional<PositionImage>
704 Player::open_subtitles_for_frame (DCPTime time) const
705 {
706         list<PositionImage> captions;
707         int const vfr = _film->video_frame_rate();
708
709         BOOST_FOREACH (
710                 PlayerText j,
711                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
712                 ) {
713
714                 /* Bitmap subtitles */
715                 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
716                 copy (c.begin(), c.end(), back_inserter (captions));
717
718                 /* String subtitles (rendered to an image) */
719                 if (!j.string.empty ()) {
720                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
721                         copy (s.begin(), s.end(), back_inserter (captions));
722                 }
723         }
724
725         if (captions.empty ()) {
726                 return optional<PositionImage> ();
727         }
728
729         return merge (captions);
730 }
731
732 void
733 Player::video (weak_ptr<Piece> wp, ContentVideo video)
734 {
735         shared_ptr<Piece> piece = wp.lock ();
736         if (!piece) {
737                 return;
738         }
739
740         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
741         if (frc.skip && (video.frame % 2) == 1) {
742                 return;
743         }
744
745         /* Time of the first frame we will emit */
746         DCPTime const time = content_video_to_dcp (piece, video.frame);
747
748         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
749            if it's after the content's period here as in that case we still need to fill any gap between
750            `now' and the end of the content's period.
751         */
752         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
753                 return;
754         }
755
756         /* Fill gaps that we discover now that we have some video which needs to be emitted.
757            This is where we need to fill to.
758         */
759         DCPTime fill_to = min (time, piece->content->end());
760
761         if (_last_video_time) {
762                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
763                 LastVideoMap::const_iterator last = _last_video.find (wp);
764                 if (_film->three_d()) {
765                         Eyes fill_to_eyes = video.eyes;
766                         if (fill_to_eyes == EYES_BOTH) {
767                                 fill_to_eyes = EYES_LEFT;
768                         }
769                         if (fill_to == piece->content->end()) {
770                                 /* Don't fill after the end of the content */
771                                 fill_to_eyes = EYES_LEFT;
772                         }
773                         DCPTime j = fill_from;
774                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
775                         if (eyes == EYES_BOTH) {
776                                 eyes = EYES_LEFT;
777                         }
778                         while (j < fill_to || eyes != fill_to_eyes) {
779                                 if (last != _last_video.end()) {
780                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
781                                         copy->set_eyes (eyes);
782                                         emit_video (copy, j);
783                                 } else {
784                                         emit_video (black_player_video_frame(eyes), j);
785                                 }
786                                 if (eyes == EYES_RIGHT) {
787                                         j += one_video_frame();
788                                 }
789                                 eyes = increment_eyes (eyes);
790                         }
791                 } else {
792                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
793                                 if (last != _last_video.end()) {
794                                         emit_video (last->second, j);
795                                 } else {
796                                         emit_video (black_player_video_frame(EYES_BOTH), j);
797                                 }
798                         }
799                 }
800         }
801
802         _last_video[wp].reset (
803                 new PlayerVideo (
804                         video.image,
805                         piece->content->video->crop (),
806                         piece->content->video->fade (video.frame),
807                         piece->content->video->scale().size (
808                                 piece->content->video, _video_container_size, _film->frame_size ()
809                                 ),
810                         _video_container_size,
811                         video.eyes,
812                         video.part,
813                         piece->content->video->colour_conversion(),
814                         piece->content,
815                         video.frame
816                         )
817                 );
818
819         DCPTime t = time;
820         for (int i = 0; i < frc.repeat; ++i) {
821                 if (t < piece->content->end()) {
822                         emit_video (_last_video[wp], t);
823                 }
824                 t += one_video_frame ();
825         }
826 }
827
828 void
829 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
830 {
831         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
832
833         shared_ptr<Piece> piece = wp.lock ();
834         if (!piece) {
835                 return;
836         }
837
838         shared_ptr<AudioContent> content = piece->content->audio;
839         DCPOMATIC_ASSERT (content);
840
841         /* Compute time in the DCP */
842         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
843         /* And the end of this block in the DCP */
844         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
845
846         /* Remove anything that comes before the start or after the end of the content */
847         if (time < piece->content->position()) {
848                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
849                 if (!cut.first) {
850                         /* This audio is entirely discarded */
851                         return;
852                 }
853                 content_audio.audio = cut.first;
854                 time = cut.second;
855         } else if (time > piece->content->end()) {
856                 /* Discard it all */
857                 return;
858         } else if (end > piece->content->end()) {
859                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
860                 if (remaining_frames == 0) {
861                         return;
862                 }
863                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
864                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
865                 content_audio.audio = cut;
866         }
867
868         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
869
870         /* Gain */
871
872         if (content->gain() != 0) {
873                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
874                 gain->apply_gain (content->gain ());
875                 content_audio.audio = gain;
876         }
877
878         /* Remap */
879
880         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
881
882         /* Process */
883
884         if (_audio_processor) {
885                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
886         }
887
888         /* Push */
889
890         _audio_merger.push (content_audio.audio, time);
891         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
892         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
893 }
894
895 void
896 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
897 {
898         shared_ptr<Piece> piece = wp.lock ();
899         shared_ptr<const TextContent> text = wc.lock ();
900         if (!piece || !text) {
901                 return;
902         }
903
904         /* Apply content's subtitle offsets */
905         subtitle.sub.rectangle.x += text->x_offset ();
906         subtitle.sub.rectangle.y += text->y_offset ();
907
908         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
909         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
910         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
911
912         /* Apply content's subtitle scale */
913         subtitle.sub.rectangle.width *= text->x_scale ();
914         subtitle.sub.rectangle.height *= text->y_scale ();
915
916         PlayerText ps;
917         ps.bitmap.push_back (subtitle.sub);
918         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
919
920         _active_texts[text->type()].add_from (wc, ps, from);
921 }
922
923 void
924 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
925 {
926         shared_ptr<Piece> piece = wp.lock ();
927         shared_ptr<const TextContent> text = wc.lock ();
928         if (!piece || !text) {
929                 return;
930         }
931
932         PlayerText ps;
933         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
934
935         if (from > piece->content->end()) {
936                 return;
937         }
938
939         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
940                 s.set_h_position (s.h_position() + text->x_offset ());
941                 s.set_v_position (s.v_position() + text->y_offset ());
942                 float const xs = text->x_scale();
943                 float const ys = text->y_scale();
944                 float size = s.size();
945
946                 /* Adjust size to express the common part of the scaling;
947                    e.g. if xs = ys = 0.5 we scale size by 2.
948                 */
949                 if (xs > 1e-5 && ys > 1e-5) {
950                         size *= 1 / min (1 / xs, 1 / ys);
951                 }
952                 s.set_size (size);
953
954                 /* Then express aspect ratio changes */
955                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
956                         s.set_aspect_adjust (xs / ys);
957                 }
958
959                 s.set_in (dcp::Time(from.seconds(), 1000));
960                 ps.string.push_back (StringText (s, text->outline_width()));
961                 ps.add_fonts (text->fonts ());
962         }
963
964         _active_texts[text->type()].add_from (wc, ps, from);
965 }
966
967 void
968 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to)
969 {
970         shared_ptr<const TextContent> text = wc.lock ();
971         if (!text) {
972                 return;
973         }
974
975         if (!_active_texts[text->type()].have(wc)) {
976                 return;
977         }
978
979         shared_ptr<Piece> piece = wp.lock ();
980         if (!piece) {
981                 return;
982         }
983
984         DCPTime const dcp_to = content_time_to_dcp (piece, to);
985
986         if (dcp_to > piece->content->end()) {
987                 return;
988         }
989
990         pair<PlayerText, DCPTime> from = _active_texts[text->type()].add_to (wc, dcp_to);
991
992         bool const always = (text->type() == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
993         if (text->use() && !always && !text->burn()) {
994                 Text (from.first, text->type(), DCPTimePeriod (from.second, dcp_to));
995         }
996 }
997
998 void
999 Player::seek (DCPTime time, bool accurate)
1000 {
1001         boost::mutex::scoped_lock lm (_mutex);
1002
1003         if (_suspended) {
1004                 /* We can't seek in this state */
1005                 return;
1006         }
1007
1008         if (_shuffler) {
1009                 _shuffler->clear ();
1010         }
1011
1012         _delay.clear ();
1013
1014         if (_audio_processor) {
1015                 _audio_processor->flush ();
1016         }
1017
1018         _audio_merger.clear ();
1019         for (int i = 0; i < TEXT_COUNT; ++i) {
1020                 _active_texts[i].clear ();
1021         }
1022
1023         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1024                 if (time < i->content->position()) {
1025                         /* Before; seek to the start of the content */
1026                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1027                         i->done = false;
1028                 } else if (i->content->position() <= time && time < i->content->end()) {
1029                         /* During; seek to position */
1030                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1031                         i->done = false;
1032                 } else {
1033                         /* After; this piece is done */
1034                         i->done = true;
1035                 }
1036         }
1037
1038         if (accurate) {
1039                 _last_video_time = time;
1040                 _last_video_eyes = EYES_LEFT;
1041                 _last_audio_time = time;
1042         } else {
1043                 _last_video_time = optional<DCPTime>();
1044                 _last_video_eyes = optional<Eyes>();
1045                 _last_audio_time = optional<DCPTime>();
1046         }
1047
1048         _black.set_position (time);
1049         _silent.set_position (time);
1050
1051         _last_video.clear ();
1052 }
1053
1054 void
1055 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1056 {
1057         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1058            player before the video that requires them.
1059         */
1060         _delay.push_back (make_pair (pv, time));
1061
1062         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1063                 _last_video_time = time + one_video_frame();
1064         }
1065         _last_video_eyes = increment_eyes (pv->eyes());
1066
1067         if (_delay.size() < 3) {
1068                 return;
1069         }
1070
1071         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1072         _delay.pop_front();
1073         do_emit_video (to_do.first, to_do.second);
1074 }
1075
1076 void
1077 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1078 {
1079         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1080                 for (int i = 0; i < TEXT_COUNT; ++i) {
1081                         _active_texts[i].clear_before (time);
1082                 }
1083         }
1084
1085         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1086         if (subtitles) {
1087                 pv->set_text (subtitles.get ());
1088         }
1089
1090         Video (pv, time);
1091 }
1092
1093 void
1094 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1095 {
1096         /* Log if the assert below is about to fail */
1097         if (_last_audio_time && time != *_last_audio_time) {
1098                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1099         }
1100
1101         /* This audio must follow on from the previous */
1102         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1103         Audio (data, time);
1104         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1105 }
1106
1107 void
1108 Player::fill_audio (DCPTimePeriod period)
1109 {
1110         if (period.from == period.to) {
1111                 return;
1112         }
1113
1114         DCPOMATIC_ASSERT (period.from < period.to);
1115
1116         DCPTime t = period.from;
1117         while (t < period.to) {
1118                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1119                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1120                 if (samples) {
1121                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1122                         silence->make_silent ();
1123                         emit_audio (silence, t);
1124                 }
1125                 t += block;
1126         }
1127 }
1128
1129 DCPTime
1130 Player::one_video_frame () const
1131 {
1132         return DCPTime::from_frames (1, _film->video_frame_rate ());
1133 }
1134
1135 pair<shared_ptr<AudioBuffers>, DCPTime>
1136 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1137 {
1138         DCPTime const discard_time = discard_to - time;
1139         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1140         Frame remaining_frames = audio->frames() - discard_frames;
1141         if (remaining_frames <= 0) {
1142                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1143         }
1144         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1145         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1146         return make_pair(cut, time + discard_time);
1147 }
1148
1149 void
1150 Player::set_dcp_decode_reduction (optional<int> reduction)
1151 {
1152         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1153
1154         {
1155                 boost::mutex::scoped_lock lm (_mutex);
1156
1157                 if (reduction == _dcp_decode_reduction) {
1158                         lm.unlock ();
1159                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1160                         return;
1161                 }
1162
1163                 _dcp_decode_reduction = reduction;
1164                 setup_pieces_unlocked ();
1165         }
1166
1167         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1168 }
1169
1170 optional<DCPTime>
1171 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1172 {
1173         boost::mutex::scoped_lock lm (_mutex);
1174
1175         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1176                 if (i->content == content) {
1177                         return content_time_to_dcp (i, t);
1178                 }
1179         }
1180
1181         /* We couldn't find this content; perhaps things are being changed over */
1182         return optional<DCPTime>();
1183 }