MIssing PENDING/CANCELLED for VIDEO_CONTAINER_SIZE. Fix 3D fill with 2D sources.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _suspended (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Change.connect (bind (&Player::film_change, this, _1, _2));
101         _playlist_change_connection = _playlist->Change.connect (bind (&Player::playlist_change, this, _1));
102         _playlist_content_change_connection = _playlist->ContentChange.connect (bind(&Player::playlist_content_change, this, _1, _3, _4));
103         set_video_container_size (_film->frame_size ());
104
105         film_change (CHANGE_TYPE_DONE, Film::AUDIO_PROCESSOR);
106
107         setup_pieces ();
108         seek (DCPTime (), true);
109 }
110
111 Player::~Player ()
112 {
113         delete _shuffler;
114 }
115
116 void
117 Player::setup_pieces ()
118 {
119         boost::mutex::scoped_lock lm (_mutex);
120         setup_pieces_unlocked ();
121 }
122
123 void
124 Player::setup_pieces_unlocked ()
125 {
126         _pieces.clear ();
127
128         delete _shuffler;
129         _shuffler = new Shuffler();
130         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
131
132         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
133
134                 if (!i->paths_valid ()) {
135                         continue;
136                 }
137
138                 if (_ignore_video && _ignore_audio && i->text.empty()) {
139                         /* We're only interested in text and this content has none */
140                         continue;
141                 }
142
143                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
144                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
145
146                 if (!decoder) {
147                         /* Not something that we can decode; e.g. Atmos content */
148                         continue;
149                 }
150
151                 if (decoder->video && _ignore_video) {
152                         decoder->video->set_ignore (true);
153                 }
154
155                 if (decoder->audio && _ignore_audio) {
156                         decoder->audio->set_ignore (true);
157                 }
158
159                 if (_ignore_text) {
160                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
161                                 i->set_ignore (true);
162                         }
163                 }
164
165                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
166                 if (dcp) {
167                         dcp->set_decode_referenced (_play_referenced);
168                         if (_play_referenced) {
169                                 dcp->set_forced_reduction (_dcp_decode_reduction);
170                         }
171                 }
172
173                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
174                 _pieces.push_back (piece);
175
176                 if (decoder->video) {
177                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
178                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
179                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
180                         } else {
181                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
182                         }
183                 }
184
185                 if (decoder->audio) {
186                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
187                 }
188
189                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
190
191                 while (j != decoder->text.end()) {
192                         (*j)->BitmapStart.connect (
193                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
194                                 );
195                         (*j)->PlainStart.connect (
196                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
197                                 );
198                         (*j)->Stop.connect (
199                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
200                                 );
201
202                         ++j;
203                 }
204         }
205
206         _stream_states.clear ();
207         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
208                 if (i->content->audio) {
209                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
210                                 _stream_states[j] = StreamState (i, i->content->position ());
211                         }
212                 }
213         }
214
215         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
216         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
217
218         _last_video_time = DCPTime ();
219         _last_video_eyes = EYES_BOTH;
220         _last_audio_time = DCPTime ();
221         _suspended = false;
222 }
223
224 void
225 Player::playlist_content_change (ChangeType type, int property, bool frequent)
226 {
227         if (type == CHANGE_TYPE_PENDING) {
228                 boost::mutex::scoped_lock lm (_mutex);
229                 /* The player content is probably about to change, so we can't carry on
230                    until that has happened and we've rebuilt our pieces.  Stop pass()
231                    and seek() from working until then.
232                 */
233                 _suspended = true;
234         } else if (type == CHANGE_TYPE_DONE) {
235                 /* A change in our content has gone through.  Re-build our pieces. */
236                 setup_pieces ();
237         }
238
239         Change (type, property, frequent);
240 }
241
242 void
243 Player::set_video_container_size (dcp::Size s)
244 {
245         Change (CHANGE_TYPE_PENDING, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
246
247         {
248                 boost::mutex::scoped_lock lm (_mutex);
249
250                 if (s == _video_container_size) {
251                         lm.unlock ();
252                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
253                         return;
254                 }
255
256                 _video_container_size = s;
257
258                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
259                 _black_image->make_black ();
260         }
261
262         Change (CHANGE_TYPE_DONE, PlayerProperty::VIDEO_CONTAINER_SIZE, false);
263 }
264
265 void
266 Player::playlist_change (ChangeType type)
267 {
268         if (type == CHANGE_TYPE_DONE) {
269                 setup_pieces ();
270         }
271         Change (type, PlayerProperty::PLAYLIST, false);
272 }
273
274 void
275 Player::film_change (ChangeType type, Film::Property p)
276 {
277         /* Here we should notice Film properties that affect our output, and
278            alert listeners that our output now would be different to how it was
279            last time we were run.
280         */
281
282         if (p == Film::CONTAINER) {
283                 Change (type, PlayerProperty::FILM_CONTAINER, false);
284         } else if (p == Film::VIDEO_FRAME_RATE) {
285                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
286                    so we need new pieces here.
287                 */
288                 if (type == CHANGE_TYPE_DONE) {
289                         setup_pieces ();
290                 }
291                 Change (type, PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
292         } else if (p == Film::AUDIO_PROCESSOR) {
293                 if (type == CHANGE_TYPE_DONE && _film->audio_processor ()) {
294                         boost::mutex::scoped_lock lm (_mutex);
295                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
296                 }
297         } else if (p == Film::AUDIO_CHANNELS) {
298                 if (type == CHANGE_TYPE_DONE) {
299                         boost::mutex::scoped_lock lm (_mutex);
300                         _audio_merger.clear ();
301                 }
302         }
303 }
304
305 list<PositionImage>
306 Player::transform_bitmap_texts (list<BitmapText> subs) const
307 {
308         list<PositionImage> all;
309
310         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
311                 if (!i->image) {
312                         continue;
313                 }
314
315                 /* We will scale the subtitle up to fit _video_container_size */
316                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
317
318                 all.push_back (
319                         PositionImage (
320                                 i->image->scale (
321                                         scaled_size,
322                                         dcp::YUV_TO_RGB_REC601,
323                                         i->image->pixel_format (),
324                                         true,
325                                         _fast
326                                         ),
327                                 Position<int> (
328                                         lrint (_video_container_size.width * i->rectangle.x),
329                                         lrint (_video_container_size.height * i->rectangle.y)
330                                         )
331                                 )
332                         );
333         }
334
335         return all;
336 }
337
338 shared_ptr<PlayerVideo>
339 Player::black_player_video_frame (Eyes eyes) const
340 {
341         return shared_ptr<PlayerVideo> (
342                 new PlayerVideo (
343                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
344                         Crop (),
345                         optional<double> (),
346                         _video_container_size,
347                         _video_container_size,
348                         eyes,
349                         PART_WHOLE,
350                         PresetColourConversion::all().front().conversion,
351                         boost::weak_ptr<Content>(),
352                         boost::optional<Frame>()
353                 )
354         );
355 }
356
357 Frame
358 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
359 {
360         DCPTime s = t - piece->content->position ();
361         s = min (piece->content->length_after_trim(), s);
362         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
363
364         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
365            then convert that ContentTime to frames at the content's rate.  However this fails for
366            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
367            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
368
369            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
370         */
371         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
372 }
373
374 DCPTime
375 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
376 {
377         /* See comment in dcp_to_content_video */
378         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
379         return d + piece->content->position();
380 }
381
382 Frame
383 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
384 {
385         DCPTime s = t - piece->content->position ();
386         s = min (piece->content->length_after_trim(), s);
387         /* See notes in dcp_to_content_video */
388         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
389 }
390
391 DCPTime
392 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
393 {
394         /* See comment in dcp_to_content_video */
395         return DCPTime::from_frames (f, _film->audio_frame_rate())
396                 - DCPTime (piece->content->trim_start(), piece->frc)
397                 + piece->content->position();
398 }
399
400 ContentTime
401 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
402 {
403         DCPTime s = t - piece->content->position ();
404         s = min (piece->content->length_after_trim(), s);
405         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
406 }
407
408 DCPTime
409 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
410 {
411         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
412 }
413
414 list<shared_ptr<Font> >
415 Player::get_subtitle_fonts ()
416 {
417         boost::mutex::scoped_lock lm (_mutex);
418
419         list<shared_ptr<Font> > fonts;
420         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
421                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
422                         /* XXX: things may go wrong if there are duplicate font IDs
423                            with different font files.
424                         */
425                         list<shared_ptr<Font> > f = j->fonts ();
426                         copy (f.begin(), f.end(), back_inserter (fonts));
427                 }
428         }
429
430         return fonts;
431 }
432
433 /** Set this player never to produce any video data */
434 void
435 Player::set_ignore_video ()
436 {
437         boost::mutex::scoped_lock lm (_mutex);
438         _ignore_video = true;
439         setup_pieces_unlocked ();
440 }
441
442 void
443 Player::set_ignore_audio ()
444 {
445         boost::mutex::scoped_lock lm (_mutex);
446         _ignore_audio = true;
447         setup_pieces_unlocked ();
448 }
449
450 void
451 Player::set_ignore_text ()
452 {
453         boost::mutex::scoped_lock lm (_mutex);
454         _ignore_text = true;
455         setup_pieces_unlocked ();
456 }
457
458 /** Set the player to always burn open texts into the image regardless of the content settings */
459 void
460 Player::set_always_burn_open_subtitles ()
461 {
462         boost::mutex::scoped_lock lm (_mutex);
463         _always_burn_open_subtitles = true;
464 }
465
466 /** Sets up the player to be faster, possibly at the expense of quality */
467 void
468 Player::set_fast ()
469 {
470         boost::mutex::scoped_lock lm (_mutex);
471         _fast = true;
472         setup_pieces_unlocked ();
473 }
474
475 void
476 Player::set_play_referenced ()
477 {
478         boost::mutex::scoped_lock lm (_mutex);
479         _play_referenced = true;
480         setup_pieces_unlocked ();
481 }
482
483 list<ReferencedReelAsset>
484 Player::get_reel_assets ()
485 {
486         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
487
488         list<ReferencedReelAsset> a;
489
490         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
491                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
492                 if (!j) {
493                         continue;
494                 }
495
496                 scoped_ptr<DCPDecoder> decoder;
497                 try {
498                         decoder.reset (new DCPDecoder (j, _film->log(), false));
499                 } catch (...) {
500                         return a;
501                 }
502
503                 int64_t offset = 0;
504                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
505
506                         DCPOMATIC_ASSERT (j->video_frame_rate ());
507                         double const cfr = j->video_frame_rate().get();
508                         Frame const trim_start = j->trim_start().frames_round (cfr);
509                         Frame const trim_end = j->trim_end().frames_round (cfr);
510                         int const ffr = _film->video_frame_rate ();
511
512                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
513                         if (j->reference_video ()) {
514                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
515                                 DCPOMATIC_ASSERT (ra);
516                                 ra->set_entry_point (ra->entry_point() + trim_start);
517                                 ra->set_duration (ra->duration() - trim_start - trim_end);
518                                 a.push_back (
519                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
520                                         );
521                         }
522
523                         if (j->reference_audio ()) {
524                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
525                                 DCPOMATIC_ASSERT (ra);
526                                 ra->set_entry_point (ra->entry_point() + trim_start);
527                                 ra->set_duration (ra->duration() - trim_start - trim_end);
528                                 a.push_back (
529                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
530                                         );
531                         }
532
533                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
534                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
535                                 DCPOMATIC_ASSERT (ra);
536                                 ra->set_entry_point (ra->entry_point() + trim_start);
537                                 ra->set_duration (ra->duration() - trim_start - trim_end);
538                                 a.push_back (
539                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
540                                         );
541                         }
542
543                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
544                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
545                                 DCPOMATIC_ASSERT (ra);
546                                 ra->set_entry_point (ra->entry_point() + trim_start);
547                                 ra->set_duration (ra->duration() - trim_start - trim_end);
548                                 a.push_back (
549                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
550                                         );
551                         }
552
553                         /* Assume that main picture duration is the length of the reel */
554                         offset += k->main_picture()->duration ();
555                 }
556         }
557
558         return a;
559 }
560
561 bool
562 Player::pass ()
563 {
564         boost::mutex::scoped_lock lm (_mutex);
565
566         if (_suspended) {
567                 /* We can't pass in this state */
568                 return false;
569         }
570
571         if (_playlist->length() == DCPTime()) {
572                 /* Special case of an empty Film; just give one black frame */
573                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
574                 return true;
575         }
576
577         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
578
579         shared_ptr<Piece> earliest_content;
580         optional<DCPTime> earliest_time;
581
582         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
583                 if (i->done) {
584                         continue;
585                 }
586
587                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
588                 if (t > i->content->end()) {
589                         i->done = true;
590                 } else {
591
592                         /* Given two choices at the same time, pick the one with texts so we see it before
593                            the video.
594                         */
595                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
596                                 earliest_time = t;
597                                 earliest_content = i;
598                         }
599                 }
600         }
601
602         bool done = false;
603
604         enum {
605                 NONE,
606                 CONTENT,
607                 BLACK,
608                 SILENT
609         } which = NONE;
610
611         if (earliest_content) {
612                 which = CONTENT;
613         }
614
615         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
616                 earliest_time = _black.position ();
617                 which = BLACK;
618         }
619
620         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
621                 earliest_time = _silent.position ();
622                 which = SILENT;
623         }
624
625         switch (which) {
626         case CONTENT:
627                 earliest_content->done = earliest_content->decoder->pass ();
628                 break;
629         case BLACK:
630                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
631                 _black.set_position (_black.position() + one_video_frame());
632                 break;
633         case SILENT:
634         {
635                 DCPTimePeriod period (_silent.period_at_position());
636                 if (_last_audio_time) {
637                         /* Sometimes the thing that happened last finishes fractionally before
638                            this silence.  Bodge the start time of the silence to fix it.  I'm
639                            not sure if this is the right solution --- maybe the last thing should
640                            be padded `forward' rather than this thing padding `back'.
641                         */
642                         period.from = min(period.from, *_last_audio_time);
643                 }
644                 if (period.duration() > one_video_frame()) {
645                         period.to = period.from + one_video_frame();
646                 }
647                 fill_audio (period);
648                 _silent.set_position (period.to);
649                 break;
650         }
651         case NONE:
652                 done = true;
653                 break;
654         }
655
656         /* Emit any audio that is ready */
657
658         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
659            of our streams, or the position of the _silent.
660         */
661         DCPTime pull_to = _film->length ();
662         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
663                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
664                         pull_to = i->second.last_push_end;
665                 }
666         }
667         if (!_silent.done() && _silent.position() < pull_to) {
668                 pull_to = _silent.position();
669         }
670
671         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
672         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
673                 if (_last_audio_time && i->second < *_last_audio_time) {
674                         /* This new data comes before the last we emitted (or the last seek); discard it */
675                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
676                         if (!cut.first) {
677                                 continue;
678                         }
679                         *i = cut;
680                 } else if (_last_audio_time && i->second > *_last_audio_time) {
681                         /* There's a gap between this data and the last we emitted; fill with silence */
682                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
683                 }
684
685                 emit_audio (i->first, i->second);
686         }
687
688         if (done) {
689                 _shuffler->flush ();
690                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
691                         do_emit_video(i->first, i->second);
692                 }
693         }
694
695         return done;
696 }
697
698 /** @return Open subtitles for the frame at the given time, converted to images */
699 optional<PositionImage>
700 Player::open_subtitles_for_frame (DCPTime time) const
701 {
702         list<PositionImage> captions;
703         int const vfr = _film->video_frame_rate();
704
705         BOOST_FOREACH (
706                 PlayerText j,
707                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
708                 ) {
709
710                 /* Bitmap subtitles */
711                 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
712                 copy (c.begin(), c.end(), back_inserter (captions));
713
714                 /* String subtitles (rendered to an image) */
715                 if (!j.string.empty ()) {
716                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
717                         copy (s.begin(), s.end(), back_inserter (captions));
718                 }
719         }
720
721         if (captions.empty ()) {
722                 return optional<PositionImage> ();
723         }
724
725         return merge (captions);
726 }
727
728 void
729 Player::video (weak_ptr<Piece> wp, ContentVideo video)
730 {
731         shared_ptr<Piece> piece = wp.lock ();
732         if (!piece) {
733                 return;
734         }
735
736         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
737         if (frc.skip && (video.frame % 2) == 1) {
738                 return;
739         }
740
741         /* Time of the first frame we will emit */
742         DCPTime const time = content_video_to_dcp (piece, video.frame);
743
744         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
745            if it's after the content's period here as in that case we still need to fill any gap between
746            `now' and the end of the content's period.
747         */
748         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
749                 return;
750         }
751
752         /* Fill gaps that we discover now that we have some video which needs to be emitted.
753            This is where we need to fill to.
754         */
755         DCPTime fill_to = min (time, piece->content->end());
756
757         if (_last_video_time) {
758                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
759                 LastVideoMap::const_iterator last = _last_video.find (wp);
760                 if (_film->three_d()) {
761                         Eyes fill_to_eyes = video.eyes;
762                         if (fill_to_eyes == EYES_BOTH) {
763                                 fill_to_eyes = EYES_LEFT;
764                         }
765                         if (fill_to == piece->content->end()) {
766                                 /* Don't fill after the end of the content */
767                                 fill_to_eyes = EYES_LEFT;
768                         }
769                         DCPTime j = fill_from;
770                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
771                         if (eyes == EYES_BOTH) {
772                                 eyes = EYES_LEFT;
773                         }
774                         while (j < fill_to || eyes != fill_to_eyes) {
775                                 if (last != _last_video.end()) {
776                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
777                                         copy->set_eyes (eyes);
778                                         emit_video (copy, j);
779                                 } else {
780                                         emit_video (black_player_video_frame(eyes), j);
781                                 }
782                                 if (eyes == EYES_RIGHT) {
783                                         j += one_video_frame();
784                                 }
785                                 eyes = increment_eyes (eyes);
786                         }
787                 } else {
788                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
789                                 if (last != _last_video.end()) {
790                                         emit_video (last->second, j);
791                                 } else {
792                                         emit_video (black_player_video_frame(EYES_BOTH), j);
793                                 }
794                         }
795                 }
796         }
797
798         _last_video[wp].reset (
799                 new PlayerVideo (
800                         video.image,
801                         piece->content->video->crop (),
802                         piece->content->video->fade (video.frame),
803                         piece->content->video->scale().size (
804                                 piece->content->video, _video_container_size, _film->frame_size ()
805                                 ),
806                         _video_container_size,
807                         video.eyes,
808                         video.part,
809                         piece->content->video->colour_conversion(),
810                         piece->content,
811                         video.frame
812                         )
813                 );
814
815         DCPTime t = time;
816         for (int i = 0; i < frc.repeat; ++i) {
817                 if (t < piece->content->end()) {
818                         emit_video (_last_video[wp], t);
819                 }
820                 t += one_video_frame ();
821         }
822 }
823
824 void
825 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
826 {
827         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
828
829         shared_ptr<Piece> piece = wp.lock ();
830         if (!piece) {
831                 return;
832         }
833
834         shared_ptr<AudioContent> content = piece->content->audio;
835         DCPOMATIC_ASSERT (content);
836
837         /* Compute time in the DCP */
838         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
839         /* And the end of this block in the DCP */
840         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
841
842         /* Remove anything that comes before the start or after the end of the content */
843         if (time < piece->content->position()) {
844                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
845                 if (!cut.first) {
846                         /* This audio is entirely discarded */
847                         return;
848                 }
849                 content_audio.audio = cut.first;
850                 time = cut.second;
851         } else if (time > piece->content->end()) {
852                 /* Discard it all */
853                 return;
854         } else if (end > piece->content->end()) {
855                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
856                 if (remaining_frames == 0) {
857                         return;
858                 }
859                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
860                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
861                 content_audio.audio = cut;
862         }
863
864         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
865
866         /* Gain */
867
868         if (content->gain() != 0) {
869                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
870                 gain->apply_gain (content->gain ());
871                 content_audio.audio = gain;
872         }
873
874         /* Remap */
875
876         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
877
878         /* Process */
879
880         if (_audio_processor) {
881                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
882         }
883
884         /* Push */
885
886         _audio_merger.push (content_audio.audio, time);
887         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
888         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
889 }
890
891 void
892 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
893 {
894         shared_ptr<Piece> piece = wp.lock ();
895         shared_ptr<const TextContent> text = wc.lock ();
896         if (!piece || !text) {
897                 return;
898         }
899
900         /* Apply content's subtitle offsets */
901         subtitle.sub.rectangle.x += text->x_offset ();
902         subtitle.sub.rectangle.y += text->y_offset ();
903
904         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
905         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
906         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
907
908         /* Apply content's subtitle scale */
909         subtitle.sub.rectangle.width *= text->x_scale ();
910         subtitle.sub.rectangle.height *= text->y_scale ();
911
912         PlayerText ps;
913         ps.bitmap.push_back (subtitle.sub);
914         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
915
916         _active_texts[subtitle.type()].add_from (wc, ps, from);
917 }
918
919 void
920 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
921 {
922         shared_ptr<Piece> piece = wp.lock ();
923         shared_ptr<const TextContent> text = wc.lock ();
924         if (!piece || !text) {
925                 return;
926         }
927
928         PlayerText ps;
929         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
930
931         if (from > piece->content->end()) {
932                 return;
933         }
934
935         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
936                 s.set_h_position (s.h_position() + text->x_offset ());
937                 s.set_v_position (s.v_position() + text->y_offset ());
938                 float const xs = text->x_scale();
939                 float const ys = text->y_scale();
940                 float size = s.size();
941
942                 /* Adjust size to express the common part of the scaling;
943                    e.g. if xs = ys = 0.5 we scale size by 2.
944                 */
945                 if (xs > 1e-5 && ys > 1e-5) {
946                         size *= 1 / min (1 / xs, 1 / ys);
947                 }
948                 s.set_size (size);
949
950                 /* Then express aspect ratio changes */
951                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
952                         s.set_aspect_adjust (xs / ys);
953                 }
954
955                 s.set_in (dcp::Time(from.seconds(), 1000));
956                 ps.string.push_back (StringText (s, text->outline_width()));
957                 ps.add_fonts (text->fonts ());
958         }
959
960         _active_texts[subtitle.type()].add_from (wc, ps, from);
961 }
962
963 void
964 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
965 {
966         if (!_active_texts[type].have (wc)) {
967                 return;
968         }
969
970         shared_ptr<Piece> piece = wp.lock ();
971         shared_ptr<const TextContent> text = wc.lock ();
972         if (!piece || !text) {
973                 return;
974         }
975
976         DCPTime const dcp_to = content_time_to_dcp (piece, to);
977
978         if (dcp_to > piece->content->end()) {
979                 return;
980         }
981
982         pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
983
984         bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
985         if (text->use() && !always && !text->burn()) {
986                 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
987         }
988 }
989
990 void
991 Player::seek (DCPTime time, bool accurate)
992 {
993         boost::mutex::scoped_lock lm (_mutex);
994
995         if (_suspended) {
996                 /* We can't seek in this state */
997                 return;
998         }
999
1000         if (_shuffler) {
1001                 _shuffler->clear ();
1002         }
1003
1004         _delay.clear ();
1005
1006         if (_audio_processor) {
1007                 _audio_processor->flush ();
1008         }
1009
1010         _audio_merger.clear ();
1011         for (int i = 0; i < TEXT_COUNT; ++i) {
1012                 _active_texts[i].clear ();
1013         }
1014
1015         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1016                 if (time < i->content->position()) {
1017                         /* Before; seek to the start of the content */
1018                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1019                         i->done = false;
1020                 } else if (i->content->position() <= time && time < i->content->end()) {
1021                         /* During; seek to position */
1022                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1023                         i->done = false;
1024                 } else {
1025                         /* After; this piece is done */
1026                         i->done = true;
1027                 }
1028         }
1029
1030         if (accurate) {
1031                 _last_video_time = time;
1032                 _last_video_eyes = EYES_LEFT;
1033                 _last_audio_time = time;
1034         } else {
1035                 _last_video_time = optional<DCPTime>();
1036                 _last_video_eyes = optional<Eyes>();
1037                 _last_audio_time = optional<DCPTime>();
1038         }
1039
1040         _black.set_position (time);
1041         _silent.set_position (time);
1042
1043         _last_video.clear ();
1044 }
1045
1046 void
1047 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1048 {
1049         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1050            player before the video that requires them.
1051         */
1052         _delay.push_back (make_pair (pv, time));
1053
1054         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1055                 _last_video_time = time + one_video_frame();
1056         }
1057         _last_video_eyes = increment_eyes (pv->eyes());
1058
1059         if (_delay.size() < 3) {
1060                 return;
1061         }
1062
1063         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1064         _delay.pop_front();
1065         do_emit_video (to_do.first, to_do.second);
1066 }
1067
1068 void
1069 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1070 {
1071         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1072                 for (int i = 0; i < TEXT_COUNT; ++i) {
1073                         _active_texts[i].clear_before (time);
1074                 }
1075         }
1076
1077         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1078         if (subtitles) {
1079                 pv->set_text (subtitles.get ());
1080         }
1081
1082         Video (pv, time);
1083 }
1084
1085 void
1086 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1087 {
1088         /* Log if the assert below is about to fail */
1089         if (_last_audio_time && time != *_last_audio_time) {
1090                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1091         }
1092
1093         /* This audio must follow on from the previous */
1094         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1095         Audio (data, time);
1096         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1097 }
1098
1099 void
1100 Player::fill_audio (DCPTimePeriod period)
1101 {
1102         if (period.from == period.to) {
1103                 return;
1104         }
1105
1106         DCPOMATIC_ASSERT (period.from < period.to);
1107
1108         DCPTime t = period.from;
1109         while (t < period.to) {
1110                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1111                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1112                 if (samples) {
1113                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1114                         silence->make_silent ();
1115                         emit_audio (silence, t);
1116                 }
1117                 t += block;
1118         }
1119 }
1120
1121 DCPTime
1122 Player::one_video_frame () const
1123 {
1124         return DCPTime::from_frames (1, _film->video_frame_rate ());
1125 }
1126
1127 pair<shared_ptr<AudioBuffers>, DCPTime>
1128 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1129 {
1130         DCPTime const discard_time = discard_to - time;
1131         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1132         Frame remaining_frames = audio->frames() - discard_frames;
1133         if (remaining_frames <= 0) {
1134                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1135         }
1136         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1137         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1138         return make_pair(cut, time + discard_time);
1139 }
1140
1141 void
1142 Player::set_dcp_decode_reduction (optional<int> reduction)
1143 {
1144         Change (CHANGE_TYPE_PENDING, PlayerProperty::DCP_DECODE_REDUCTION, false);
1145
1146         {
1147                 boost::mutex::scoped_lock lm (_mutex);
1148
1149                 if (reduction == _dcp_decode_reduction) {
1150                         lm.unlock ();
1151                         Change (CHANGE_TYPE_CANCELLED, PlayerProperty::DCP_DECODE_REDUCTION, false);
1152                         return;
1153                 }
1154
1155                 _dcp_decode_reduction = reduction;
1156                 setup_pieces_unlocked ();
1157         }
1158
1159         Change (CHANGE_TYPE_DONE, PlayerProperty::DCP_DECODE_REDUCTION, false);
1160 }
1161
1162 optional<DCPTime>
1163 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1164 {
1165         boost::mutex::scoped_lock lm (_mutex);
1166
1167         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1168                 if (i->content == content) {
1169                         return content_time_to_dcp (i, t);
1170                 }
1171         }
1172
1173         /* We couldn't find this content; perhaps things are being changed over */
1174         return optional<DCPTime>();
1175 }