Some subtitle renaming.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2018 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_text.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "text_content.h"
44 #include "text_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "dcp_decoder.h"
48 #include "image_decoder.h"
49 #include "compose.hpp"
50 #include "shuffler.h"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <dcp/reel_closed_caption_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 int const PlayerProperty::VIDEO_CONTAINER_SIZE = 700;
82 int const PlayerProperty::PLAYLIST = 701;
83 int const PlayerProperty::FILM_CONTAINER = 702;
84 int const PlayerProperty::FILM_VIDEO_FRAME_RATE = 703;
85 int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
86
87 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
88         : _film (film)
89         , _playlist (playlist)
90         , _have_valid_pieces (false)
91         , _ignore_video (false)
92         , _ignore_audio (false)
93         , _ignore_text (false)
94         , _always_burn_open_subtitles (false)
95         , _fast (false)
96         , _play_referenced (false)
97         , _audio_merger (_film->audio_frame_rate())
98         , _shuffler (0)
99 {
100         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
101         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
102         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
103         set_video_container_size (_film->frame_size ());
104
105         film_changed (Film::AUDIO_PROCESSOR);
106
107         seek (DCPTime (), true);
108 }
109
110 Player::~Player ()
111 {
112         delete _shuffler;
113 }
114
115 void
116 Player::setup_pieces ()
117 {
118         _pieces.clear ();
119
120         delete _shuffler;
121         _shuffler = new Shuffler();
122         _shuffler->Video.connect(bind(&Player::video, this, _1, _2));
123
124         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125
126                 if (!i->paths_valid ()) {
127                         continue;
128                 }
129
130                 if (_ignore_video && _ignore_audio && i->text.empty()) {
131                         /* We're only interested in text and this content has none */
132                         continue;
133                 }
134
135                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log(), _fast);
136                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
137
138                 if (!decoder) {
139                         /* Not something that we can decode; e.g. Atmos content */
140                         continue;
141                 }
142
143                 if (decoder->video && _ignore_video) {
144                         decoder->video->set_ignore (true);
145                 }
146
147                 if (decoder->audio && _ignore_audio) {
148                         decoder->audio->set_ignore (true);
149                 }
150
151                 if (_ignore_text) {
152                         BOOST_FOREACH (shared_ptr<TextDecoder> i, decoder->text) {
153                                 i->set_ignore (true);
154                         }
155                 }
156
157                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
158                 if (dcp) {
159                         dcp->set_decode_referenced (_play_referenced);
160                         if (_play_referenced) {
161                                 dcp->set_forced_reduction (_dcp_decode_reduction);
162                         }
163                 }
164
165                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
166                 _pieces.push_back (piece);
167
168                 if (decoder->video) {
169                         if (i->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT || i->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) {
170                                 /* We need a Shuffler to cope with 3D L/R video data arriving out of sequence */
171                                 decoder->video->Data.connect (bind (&Shuffler::video, _shuffler, weak_ptr<Piece>(piece), _1));
172                         } else {
173                                 decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece>(piece), _1));
174                         }
175                 }
176
177                 if (decoder->audio) {
178                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
179                 }
180
181                 list<shared_ptr<TextDecoder> >::const_iterator j = decoder->text.begin();
182
183                 while (j != decoder->text.end()) {
184                         (*j)->BitmapStart.connect (
185                                 bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
186                                 );
187                         (*j)->PlainStart.connect (
188                                 bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1)
189                                 );
190                         (*j)->Stop.connect (
191                                 bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), weak_ptr<const TextContent>((*j)->content()), _1, _2)
192                                 );
193
194                         ++j;
195                 }
196         }
197
198         _stream_states.clear ();
199         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
200                 if (i->content->audio) {
201                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
202                                 _stream_states[j] = StreamState (i, i->content->position ());
203                         }
204                 }
205         }
206
207         _black = Empty (_film->content(), _film->length(), bind(&Content::video, _1));
208         _silent = Empty (_film->content(), _film->length(), bind(&Content::audio, _1));
209
210         _last_video_time = DCPTime ();
211         _last_video_eyes = EYES_BOTH;
212         _last_audio_time = DCPTime ();
213         _have_valid_pieces = true;
214 }
215
216 void
217 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
218 {
219         shared_ptr<Content> c = w.lock ();
220         if (!c) {
221                 return;
222         }
223
224         if (
225                 property == ContentProperty::POSITION ||
226                 property == ContentProperty::LENGTH ||
227                 property == ContentProperty::TRIM_START ||
228                 property == ContentProperty::TRIM_END ||
229                 property == ContentProperty::PATH ||
230                 property == VideoContentProperty::FRAME_TYPE ||
231                 property == VideoContentProperty::COLOUR_CONVERSION ||
232                 property == AudioContentProperty::STREAMS ||
233                 property == DCPContentProperty::NEEDS_ASSETS ||
234                 property == DCPContentProperty::NEEDS_KDM ||
235                 property == TextContentProperty::COLOUR ||
236                 property == TextContentProperty::EFFECT ||
237                 property == TextContentProperty::EFFECT_COLOUR ||
238                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
239                 property == FFmpegContentProperty::FILTERS
240                 ) {
241
242                 {
243                         boost::mutex::scoped_lock lm (_mutex);
244                         _have_valid_pieces = false;
245                 }
246
247                 Changed (property, frequent);
248
249         } else if (
250                 property == TextContentProperty::LINE_SPACING ||
251                 property == TextContentProperty::OUTLINE_WIDTH ||
252                 property == TextContentProperty::Y_SCALE ||
253                 property == TextContentProperty::FADE_IN ||
254                 property == TextContentProperty::FADE_OUT ||
255                 property == ContentProperty::VIDEO_FRAME_RATE ||
256                 property == TextContentProperty::USE ||
257                 property == TextContentProperty::X_OFFSET ||
258                 property == TextContentProperty::Y_OFFSET ||
259                 property == TextContentProperty::X_SCALE ||
260                 property == TextContentProperty::FONTS ||
261                 property == TextContentProperty::TYPE ||
262                 property == VideoContentProperty::CROP ||
263                 property == VideoContentProperty::SCALE ||
264                 property == VideoContentProperty::FADE_IN ||
265                 property == VideoContentProperty::FADE_OUT
266                 ) {
267
268                 Changed (property, frequent);
269         }
270 }
271
272 void
273 Player::set_video_container_size (dcp::Size s)
274 {
275         {
276                 boost::mutex::scoped_lock lm (_mutex);
277
278                 if (s == _video_container_size) {
279                         return;
280                 }
281
282                 _video_container_size = s;
283
284                 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
285                 _black_image->make_black ();
286         }
287
288         Changed (PlayerProperty::VIDEO_CONTAINER_SIZE, false);
289 }
290
291 void
292 Player::playlist_changed ()
293 {
294         {
295                 boost::mutex::scoped_lock lm (_mutex);
296                 _have_valid_pieces = false;
297         }
298
299         Changed (PlayerProperty::PLAYLIST, false);
300 }
301
302 void
303 Player::film_changed (Film::Property p)
304 {
305         /* Here we should notice Film properties that affect our output, and
306            alert listeners that our output now would be different to how it was
307            last time we were run.
308         */
309
310         if (p == Film::CONTAINER) {
311                 Changed (PlayerProperty::FILM_CONTAINER, false);
312         } else if (p == Film::VIDEO_FRAME_RATE) {
313                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
314                    so we need new pieces here.
315                 */
316                 {
317                         boost::mutex::scoped_lock lm (_mutex);
318                         _have_valid_pieces = false;
319                 }
320                 Changed (PlayerProperty::FILM_VIDEO_FRAME_RATE, false);
321         } else if (p == Film::AUDIO_PROCESSOR) {
322                 if (_film->audio_processor ()) {
323                         boost::mutex::scoped_lock lm (_mutex);
324                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
325                 }
326         } else if (p == Film::AUDIO_CHANNELS) {
327                 boost::mutex::scoped_lock lm (_mutex);
328                 _audio_merger.clear ();
329         }
330 }
331
332 list<PositionImage>
333 Player::transform_bitmap_texts (list<BitmapText> subs) const
334 {
335         list<PositionImage> all;
336
337         for (list<BitmapText>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
338                 if (!i->image) {
339                         continue;
340                 }
341
342                 /* We will scale the subtitle up to fit _video_container_size */
343                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
344
345                 all.push_back (
346                         PositionImage (
347                                 i->image->scale (
348                                         scaled_size,
349                                         dcp::YUV_TO_RGB_REC601,
350                                         i->image->pixel_format (),
351                                         true,
352                                         _fast
353                                         ),
354                                 Position<int> (
355                                         lrint (_video_container_size.width * i->rectangle.x),
356                                         lrint (_video_container_size.height * i->rectangle.y)
357                                         )
358                                 )
359                         );
360         }
361
362         return all;
363 }
364
365 shared_ptr<PlayerVideo>
366 Player::black_player_video_frame (Eyes eyes) const
367 {
368         return shared_ptr<PlayerVideo> (
369                 new PlayerVideo (
370                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
371                         Crop (),
372                         optional<double> (),
373                         _video_container_size,
374                         _video_container_size,
375                         eyes,
376                         PART_WHOLE,
377                         PresetColourConversion::all().front().conversion,
378                         boost::weak_ptr<Content>(),
379                         boost::optional<Frame>()
380                 )
381         );
382 }
383
384 Frame
385 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
386 {
387         DCPTime s = t - piece->content->position ();
388         s = min (piece->content->length_after_trim(), s);
389         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
390
391         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
392            then convert that ContentTime to frames at the content's rate.  However this fails for
393            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
394            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
395
396            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
397         */
398         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
399 }
400
401 DCPTime
402 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
403 {
404         /* See comment in dcp_to_content_video */
405         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime(piece->content->trim_start(), piece->frc);
406         return d + piece->content->position();
407 }
408
409 Frame
410 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
411 {
412         DCPTime s = t - piece->content->position ();
413         s = min (piece->content->length_after_trim(), s);
414         /* See notes in dcp_to_content_video */
415         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
416 }
417
418 DCPTime
419 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
420 {
421         /* See comment in dcp_to_content_video */
422         return DCPTime::from_frames (f, _film->audio_frame_rate())
423                 - DCPTime (piece->content->trim_start(), piece->frc)
424                 + piece->content->position();
425 }
426
427 ContentTime
428 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
429 {
430         DCPTime s = t - piece->content->position ();
431         s = min (piece->content->length_after_trim(), s);
432         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
433 }
434
435 DCPTime
436 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
437 {
438         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
439 }
440
441 list<shared_ptr<Font> >
442 Player::get_subtitle_fonts ()
443 {
444         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
445
446         if (!_have_valid_pieces) {
447                 setup_pieces ();
448         }
449
450         list<shared_ptr<Font> > fonts;
451         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
452                 BOOST_FOREACH (shared_ptr<TextContent> j, i->content->text) {
453                         /* XXX: things may go wrong if there are duplicate font IDs
454                            with different font files.
455                         */
456                         list<shared_ptr<Font> > f = j->fonts ();
457                         copy (f.begin(), f.end(), back_inserter (fonts));
458                 }
459         }
460
461         return fonts;
462 }
463
464 /** Set this player never to produce any video data */
465 void
466 Player::set_ignore_video ()
467 {
468         boost::mutex::scoped_lock lm (_mutex);
469         _ignore_video = true;
470         _have_valid_pieces = false;
471 }
472
473 void
474 Player::set_ignore_audio ()
475 {
476         boost::mutex::scoped_lock lm (_mutex);
477         _ignore_audio = true;
478         _have_valid_pieces = false;
479 }
480
481 void
482 Player::set_ignore_text ()
483 {
484         boost::mutex::scoped_lock lm (_mutex);
485         _ignore_text = true;
486 }
487
488 /** Set the player to always burn open texts into the image regardless of the content settings */
489 void
490 Player::set_always_burn_open_subtitles ()
491 {
492         boost::mutex::scoped_lock lm (_mutex);
493         _always_burn_open_subtitles = true;
494 }
495
496 /** Sets up the player to be faster, possibly at the expense of quality */
497 void
498 Player::set_fast ()
499 {
500         boost::mutex::scoped_lock lm (_mutex);
501         _fast = true;
502         _have_valid_pieces = false;
503 }
504
505 void
506 Player::set_play_referenced ()
507 {
508         boost::mutex::scoped_lock lm (_mutex);
509         _play_referenced = true;
510         _have_valid_pieces = false;
511 }
512
513 list<ReferencedReelAsset>
514 Player::get_reel_assets ()
515 {
516         /* Does not require a lock on _mutex as it's only called from DCPEncoder */
517
518         list<ReferencedReelAsset> a;
519
520         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
521                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
522                 if (!j) {
523                         continue;
524                 }
525
526                 scoped_ptr<DCPDecoder> decoder;
527                 try {
528                         decoder.reset (new DCPDecoder (j, _film->log(), false));
529                 } catch (...) {
530                         return a;
531                 }
532
533                 int64_t offset = 0;
534                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
535
536                         DCPOMATIC_ASSERT (j->video_frame_rate ());
537                         double const cfr = j->video_frame_rate().get();
538                         Frame const trim_start = j->trim_start().frames_round (cfr);
539                         Frame const trim_end = j->trim_end().frames_round (cfr);
540                         int const ffr = _film->video_frame_rate ();
541
542                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
543                         if (j->reference_video ()) {
544                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
545                                 DCPOMATIC_ASSERT (ra);
546                                 ra->set_entry_point (ra->entry_point() + trim_start);
547                                 ra->set_duration (ra->duration() - trim_start - trim_end);
548                                 a.push_back (
549                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
550                                         );
551                         }
552
553                         if (j->reference_audio ()) {
554                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
555                                 DCPOMATIC_ASSERT (ra);
556                                 ra->set_entry_point (ra->entry_point() + trim_start);
557                                 ra->set_duration (ra->duration() - trim_start - trim_end);
558                                 a.push_back (
559                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
560                                         );
561                         }
562
563                         if (j->reference_text (TEXT_OPEN_SUBTITLE)) {
564                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
565                                 DCPOMATIC_ASSERT (ra);
566                                 ra->set_entry_point (ra->entry_point() + trim_start);
567                                 ra->set_duration (ra->duration() - trim_start - trim_end);
568                                 a.push_back (
569                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
570                                         );
571                         }
572
573                         if (j->reference_text (TEXT_CLOSED_CAPTION)) {
574                                 shared_ptr<dcp::ReelAsset> ra = k->closed_caption ();
575                                 DCPOMATIC_ASSERT (ra);
576                                 ra->set_entry_point (ra->entry_point() + trim_start);
577                                 ra->set_duration (ra->duration() - trim_start - trim_end);
578                                 a.push_back (
579                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
580                                         );
581                         }
582
583                         /* Assume that main picture duration is the length of the reel */
584                         offset += k->main_picture()->duration ();
585                 }
586         }
587
588         return a;
589 }
590
591 bool
592 Player::pass ()
593 {
594         boost::mutex::scoped_lock lm (_mutex);
595
596         if (!_have_valid_pieces) {
597                 setup_pieces ();
598         }
599
600         if (_playlist->length() == DCPTime()) {
601                 /* Special case of an empty Film; just give one black frame */
602                 emit_video (black_player_video_frame(EYES_BOTH), DCPTime());
603                 return true;
604         }
605
606         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
607
608         shared_ptr<Piece> earliest_content;
609         optional<DCPTime> earliest_time;
610
611         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
612                 if (i->done) {
613                         continue;
614                 }
615
616                 DCPTime const t = content_time_to_dcp (i, max(i->decoder->position(), i->content->trim_start()));
617                 if (t > i->content->end()) {
618                         i->done = true;
619                 } else {
620
621                         /* Given two choices at the same time, pick the one with texts so we see it before
622                            the video.
623                         */
624                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && !i->decoder->text.empty())) {
625                                 earliest_time = t;
626                                 earliest_content = i;
627                         }
628                 }
629         }
630
631         bool done = false;
632
633         enum {
634                 NONE,
635                 CONTENT,
636                 BLACK,
637                 SILENT
638         } which = NONE;
639
640         if (earliest_content) {
641                 which = CONTENT;
642         }
643
644         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
645                 earliest_time = _black.position ();
646                 which = BLACK;
647         }
648
649         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
650                 earliest_time = _silent.position ();
651                 which = SILENT;
652         }
653
654         switch (which) {
655         case CONTENT:
656                 earliest_content->done = earliest_content->decoder->pass ();
657                 break;
658         case BLACK:
659                 emit_video (black_player_video_frame(EYES_BOTH), _black.position());
660                 _black.set_position (_black.position() + one_video_frame());
661                 break;
662         case SILENT:
663         {
664                 DCPTimePeriod period (_silent.period_at_position());
665                 if (_last_audio_time) {
666                         /* Sometimes the thing that happened last finishes fractionally before
667                            this silence.  Bodge the start time of the silence to fix it.  I'm
668                            not sure if this is the right solution --- maybe the last thing should
669                            be padded `forward' rather than this thing padding `back'.
670                         */
671                         period.from = min(period.from, *_last_audio_time);
672                 }
673                 if (period.duration() > one_video_frame()) {
674                         period.to = period.from + one_video_frame();
675                 }
676                 fill_audio (period);
677                 _silent.set_position (period.to);
678                 break;
679         }
680         case NONE:
681                 done = true;
682                 break;
683         }
684
685         /* Emit any audio that is ready */
686
687         /* Work out the time before which the audio is definitely all here.  This is the earliest last_push_end of one
688            of our streams, or the position of the _silent.
689         */
690         DCPTime pull_to = _film->length ();
691         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
692                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
693                         pull_to = i->second.last_push_end;
694                 }
695         }
696         if (!_silent.done() && _silent.position() < pull_to) {
697                 pull_to = _silent.position();
698         }
699
700         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
701         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
702                 if (_last_audio_time && i->second < *_last_audio_time) {
703                         /* This new data comes before the last we emitted (or the last seek); discard it */
704                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
705                         if (!cut.first) {
706                                 continue;
707                         }
708                         *i = cut;
709                 } else if (_last_audio_time && i->second > *_last_audio_time) {
710                         /* There's a gap between this data and the last we emitted; fill with silence */
711                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
712                 }
713
714                 emit_audio (i->first, i->second);
715         }
716
717         if (done) {
718                 _shuffler->flush ();
719                 for (list<pair<shared_ptr<PlayerVideo>, DCPTime> >::const_iterator i = _delay.begin(); i != _delay.end(); ++i) {
720                         do_emit_video(i->first, i->second);
721                 }
722         }
723
724         return done;
725 }
726
727 list<PlayerText>
728 Player::closed_captions_for_frame (DCPTime time) const
729 {
730         boost::mutex::scoped_lock _lm (_mutex);
731         return _active_texts[TEXT_CLOSED_CAPTION].get (
732                 DCPTimePeriod(time, time + DCPTime::from_frames(1, _film->video_frame_rate()))
733                 );
734 }
735
736 /** @return Open subtitles for the frame at the given time, converted to images */
737 optional<PositionImage>
738 Player::open_subtitles_for_frame (DCPTime time) const
739 {
740         list<PositionImage> captions;
741         int const vfr = _film->video_frame_rate();
742
743         BOOST_FOREACH (
744                 PlayerText j,
745                 _active_texts[TEXT_OPEN_SUBTITLE].get_burnt(DCPTimePeriod(time, time + DCPTime::from_frames(1, vfr)), _always_burn_open_subtitles)
746                 ) {
747
748                 /* Bitmap subtitles */
749                 list<PositionImage> c = transform_bitmap_texts (j.bitmap);
750                 copy (c.begin(), c.end(), back_inserter (captions));
751
752                 /* String subtitles (rendered to an image) */
753                 if (!j.string.empty ()) {
754                         list<PositionImage> s = render_text (j.string, j.fonts, _video_container_size, time, vfr);
755                         copy (s.begin(), s.end(), back_inserter (captions));
756                 }
757         }
758
759         if (captions.empty ()) {
760                 return optional<PositionImage> ();
761         }
762
763         return merge (captions);
764 }
765
766 void
767 Player::video (weak_ptr<Piece> wp, ContentVideo video)
768 {
769         shared_ptr<Piece> piece = wp.lock ();
770         if (!piece) {
771                 return;
772         }
773
774         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
775         if (frc.skip && (video.frame % 2) == 1) {
776                 return;
777         }
778
779         /* Time of the first frame we will emit */
780         DCPTime const time = content_video_to_dcp (piece, video.frame);
781
782         /* Discard if it's before the content's period or the last accurate seek.  We can't discard
783            if it's after the content's period here as in that case we still need to fill any gap between
784            `now' and the end of the content's period.
785         */
786         if (time < piece->content->position() || (_last_video_time && time < *_last_video_time)) {
787                 return;
788         }
789
790         /* Fill gaps that we discover now that we have some video which needs to be emitted.
791            This is where we need to fill to.
792         */
793         DCPTime fill_to = min (time, piece->content->end());
794
795         if (_last_video_time) {
796                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
797                 LastVideoMap::const_iterator last = _last_video.find (wp);
798                 if (_film->three_d()) {
799                         DCPTime j = fill_from;
800                         Eyes eyes = _last_video_eyes.get_value_or(EYES_LEFT);
801                         if (eyes == EYES_BOTH) {
802                                 eyes = EYES_LEFT;
803                         }
804                         while (j < fill_to || eyes != video.eyes) {
805                                 if (last != _last_video.end()) {
806                                         shared_ptr<PlayerVideo> copy = last->second->shallow_copy();
807                                         copy->set_eyes (eyes);
808                                         emit_video (copy, j);
809                                 } else {
810                                         emit_video (black_player_video_frame(eyes), j);
811                                 }
812                                 if (eyes == EYES_RIGHT) {
813                                         j += one_video_frame();
814                                 }
815                                 eyes = increment_eyes (eyes);
816                         }
817                 } else {
818                         for (DCPTime j = fill_from; j < fill_to; j += one_video_frame()) {
819                                 if (last != _last_video.end()) {
820                                         emit_video (last->second, j);
821                                 } else {
822                                         emit_video (black_player_video_frame(EYES_BOTH), j);
823                                 }
824                         }
825                 }
826         }
827
828         _last_video[wp].reset (
829                 new PlayerVideo (
830                         video.image,
831                         piece->content->video->crop (),
832                         piece->content->video->fade (video.frame),
833                         piece->content->video->scale().size (
834                                 piece->content->video, _video_container_size, _film->frame_size ()
835                                 ),
836                         _video_container_size,
837                         video.eyes,
838                         video.part,
839                         piece->content->video->colour_conversion(),
840                         piece->content,
841                         video.frame
842                         )
843                 );
844
845         DCPTime t = time;
846         for (int i = 0; i < frc.repeat; ++i) {
847                 if (t < piece->content->end()) {
848                         emit_video (_last_video[wp], t);
849                 }
850                 t += one_video_frame ();
851         }
852 }
853
854 void
855 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
856 {
857         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
858
859         shared_ptr<Piece> piece = wp.lock ();
860         if (!piece) {
861                 return;
862         }
863
864         shared_ptr<AudioContent> content = piece->content->audio;
865         DCPOMATIC_ASSERT (content);
866
867         /* Compute time in the DCP */
868         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
869         /* And the end of this block in the DCP */
870         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
871
872         /* Remove anything that comes before the start or after the end of the content */
873         if (time < piece->content->position()) {
874                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
875                 if (!cut.first) {
876                         /* This audio is entirely discarded */
877                         return;
878                 }
879                 content_audio.audio = cut.first;
880                 time = cut.second;
881         } else if (time > piece->content->end()) {
882                 /* Discard it all */
883                 return;
884         } else if (end > piece->content->end()) {
885                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
886                 if (remaining_frames == 0) {
887                         return;
888                 }
889                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
890                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
891                 content_audio.audio = cut;
892         }
893
894         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
895
896         /* Gain */
897
898         if (content->gain() != 0) {
899                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
900                 gain->apply_gain (content->gain ());
901                 content_audio.audio = gain;
902         }
903
904         /* Remap */
905
906         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
907
908         /* Process */
909
910         if (_audio_processor) {
911                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
912         }
913
914         /* Push */
915
916         _audio_merger.push (content_audio.audio, time);
917         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
918         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
919 }
920
921 void
922 Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentBitmapText subtitle)
923 {
924         shared_ptr<Piece> piece = wp.lock ();
925         shared_ptr<const TextContent> text = wc.lock ();
926         if (!piece || !text) {
927                 return;
928         }
929
930         /* Apply content's subtitle offsets */
931         subtitle.sub.rectangle.x += text->x_offset ();
932         subtitle.sub.rectangle.y += text->y_offset ();
933
934         /* Apply a corrective translation to keep the subtitle centred after the scale that is coming up */
935         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * ((text->x_scale() - 1) / 2);
936         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * ((text->y_scale() - 1) / 2);
937
938         /* Apply content's subtitle scale */
939         subtitle.sub.rectangle.width *= text->x_scale ();
940         subtitle.sub.rectangle.height *= text->y_scale ();
941
942         PlayerText ps;
943         ps.bitmap.push_back (subtitle.sub);
944         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
945
946         _active_texts[subtitle.type()].add_from (wc, ps, from);
947 }
948
949 void
950 Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentStringText subtitle)
951 {
952         shared_ptr<Piece> piece = wp.lock ();
953         shared_ptr<const TextContent> text = wc.lock ();
954         if (!piece || !text) {
955                 return;
956         }
957
958         PlayerText ps;
959         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
960
961         if (from > piece->content->end()) {
962                 return;
963         }
964
965         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
966                 s.set_h_position (s.h_position() + text->x_offset ());
967                 s.set_v_position (s.v_position() + text->y_offset ());
968                 float const xs = text->x_scale();
969                 float const ys = text->y_scale();
970                 float size = s.size();
971
972                 /* Adjust size to express the common part of the scaling;
973                    e.g. if xs = ys = 0.5 we scale size by 2.
974                 */
975                 if (xs > 1e-5 && ys > 1e-5) {
976                         size *= 1 / min (1 / xs, 1 / ys);
977                 }
978                 s.set_size (size);
979
980                 /* Then express aspect ratio changes */
981                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
982                         s.set_aspect_adjust (xs / ys);
983                 }
984
985                 s.set_in (dcp::Time(from.seconds(), 1000));
986                 ps.string.push_back (StringText (s, text->outline_width()));
987                 ps.add_fonts (text->fonts ());
988         }
989
990         _active_texts[subtitle.type()].add_from (wc, ps, from);
991 }
992
993 void
994 Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const TextContent> wc, ContentTime to, TextType type)
995 {
996         if (!_active_texts[type].have (wc)) {
997                 return;
998         }
999
1000         shared_ptr<Piece> piece = wp.lock ();
1001         shared_ptr<const TextContent> text = wc.lock ();
1002         if (!piece || !text) {
1003                 return;
1004         }
1005
1006         DCPTime const dcp_to = content_time_to_dcp (piece, to);
1007
1008         if (dcp_to > piece->content->end()) {
1009                 return;
1010         }
1011
1012         pair<PlayerText, DCPTime> from = _active_texts[type].add_to (wc, dcp_to);
1013
1014         bool const always = (type == TEXT_OPEN_SUBTITLE && _always_burn_open_subtitles);
1015         if (text->use() && !always && !text->burn()) {
1016                 Text (from.first, type, DCPTimePeriod (from.second, dcp_to));
1017         }
1018 }
1019
1020 void
1021 Player::seek (DCPTime time, bool accurate)
1022 {
1023         boost::mutex::scoped_lock lm (_mutex);
1024
1025         if (!_have_valid_pieces) {
1026                 setup_pieces ();
1027         }
1028
1029         if (_shuffler) {
1030                 _shuffler->clear ();
1031         }
1032
1033         _delay.clear ();
1034
1035         if (_audio_processor) {
1036                 _audio_processor->flush ();
1037         }
1038
1039         _audio_merger.clear ();
1040         for (int i = 0; i < TEXT_COUNT; ++i) {
1041                 _active_texts[i].clear ();
1042         }
1043
1044         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1045                 if (time < i->content->position()) {
1046                         /* Before; seek to the start of the content */
1047                         i->decoder->seek (dcp_to_content_time (i, i->content->position()), accurate);
1048                         i->done = false;
1049                 } else if (i->content->position() <= time && time < i->content->end()) {
1050                         /* During; seek to position */
1051                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
1052                         i->done = false;
1053                 } else {
1054                         /* After; this piece is done */
1055                         i->done = true;
1056                 }
1057         }
1058
1059         if (accurate) {
1060                 _last_video_time = time;
1061                 _last_video_eyes = EYES_LEFT;
1062                 _last_audio_time = time;
1063         } else {
1064                 _last_video_time = optional<DCPTime>();
1065                 _last_video_eyes = optional<Eyes>();
1066                 _last_audio_time = optional<DCPTime>();
1067         }
1068
1069         _black.set_position (time);
1070         _silent.set_position (time);
1071
1072         _last_video.clear ();
1073 }
1074
1075 void
1076 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1077 {
1078         /* We need a delay to give a little wiggle room to ensure that relevent subtitles arrive at the
1079            player before the video that requires them.
1080         */
1081         _delay.push_back (make_pair (pv, time));
1082
1083         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1084                 _last_video_time = time + one_video_frame();
1085         }
1086         _last_video_eyes = increment_eyes (pv->eyes());
1087
1088         if (_delay.size() < 3) {
1089                 return;
1090         }
1091
1092         pair<shared_ptr<PlayerVideo>, DCPTime> to_do = _delay.front();
1093         _delay.pop_front();
1094         do_emit_video (to_do.first, to_do.second);
1095 }
1096
1097 void
1098 Player::do_emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1099 {
1100         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1101                 for (int i = 0; i < TEXT_COUNT; ++i) {
1102                         _active_texts[i].clear_before (time);
1103                 }
1104         }
1105
1106         optional<PositionImage> subtitles = open_subtitles_for_frame (time);
1107         if (subtitles) {
1108                 pv->set_text (subtitles.get ());
1109         }
1110
1111         Video (pv, time);
1112 }
1113
1114 void
1115 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1116 {
1117         /* Log if the assert below is about to fail */
1118         if (_last_audio_time && time != *_last_audio_time) {
1119                 _film->log()->log(String::compose("Out-of-sequence emit %1 vs %2", to_string(time), to_string(*_last_audio_time)), LogEntry::TYPE_WARNING);
1120         }
1121
1122         /* This audio must follow on from the previous */
1123         DCPOMATIC_ASSERT (!_last_audio_time || time == *_last_audio_time);
1124         Audio (data, time);
1125         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
1126 }
1127
1128 void
1129 Player::fill_audio (DCPTimePeriod period)
1130 {
1131         if (period.from == period.to) {
1132                 return;
1133         }
1134
1135         DCPOMATIC_ASSERT (period.from < period.to);
1136
1137         DCPTime t = period.from;
1138         while (t < period.to) {
1139                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
1140                 Frame const samples = block.frames_round(_film->audio_frame_rate());
1141                 if (samples) {
1142                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1143                         silence->make_silent ();
1144                         emit_audio (silence, t);
1145                 }
1146                 t += block;
1147         }
1148 }
1149
1150 DCPTime
1151 Player::one_video_frame () const
1152 {
1153         return DCPTime::from_frames (1, _film->video_frame_rate ());
1154 }
1155
1156 pair<shared_ptr<AudioBuffers>, DCPTime>
1157 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1158 {
1159         DCPTime const discard_time = discard_to - time;
1160         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1161         Frame remaining_frames = audio->frames() - discard_frames;
1162         if (remaining_frames <= 0) {
1163                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1164         }
1165         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1166         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1167         return make_pair(cut, time + discard_time);
1168 }
1169
1170 void
1171 Player::set_dcp_decode_reduction (optional<int> reduction)
1172 {
1173         {
1174                 boost::mutex::scoped_lock lm (_mutex);
1175
1176                 if (reduction == _dcp_decode_reduction) {
1177                         return;
1178                 }
1179
1180                 _dcp_decode_reduction = reduction;
1181                 _have_valid_pieces = false;
1182         }
1183
1184         Changed (PlayerProperty::DCP_DECODE_REDUCTION, false);
1185 }
1186
1187 DCPTime
1188 Player::content_time_to_dcp (shared_ptr<Content> content, ContentTime t)
1189 {
1190         boost::mutex::scoped_lock lm (_mutex);
1191
1192         if (_have_valid_pieces) {
1193                 setup_pieces ();
1194         }
1195
1196         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
1197                 if (i->content == content) {
1198                         return content_time_to_dcp (i, t);
1199                 }
1200         }
1201
1202         DCPOMATIC_ASSERT (false);
1203         return DCPTime ();
1204 }