Fix audio fill in the presence of audio delay.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _audio_merger (_film->audio_frame_rate())
91 {
92         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
93         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
94         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
95         set_video_container_size (_film->frame_size ());
96
97         film_changed (Film::AUDIO_PROCESSOR);
98
99         seek (DCPTime (), true);
100 }
101
102 void
103 Player::setup_pieces ()
104 {
105         _pieces.clear ();
106
107         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
108
109                 if (!i->paths_valid ()) {
110                         continue;
111                 }
112
113                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
114                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
115
116                 if (!decoder) {
117                         /* Not something that we can decode; e.g. Atmos content */
118                         continue;
119                 }
120
121                 if (decoder->video && _ignore_video) {
122                         decoder->video->set_ignore ();
123                 }
124
125                 if (decoder->audio && _ignore_audio) {
126                         decoder->audio->set_ignore ();
127                 }
128
129                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
130                 if (dcp && _play_referenced) {
131                         dcp->set_decode_referenced ();
132                 }
133
134                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
135                 _pieces.push_back (piece);
136
137                 if (decoder->video) {
138                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
139                 }
140
141                 if (decoder->audio) {
142                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
143                 }
144
145                 if (decoder->subtitle) {
146                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
149                 }
150         }
151
152         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
153                 if (i->content->audio) {
154                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
155                                 _stream_states[j] = StreamState (i, i->content->position ());
156                         }
157                 }
158         }
159
160         if (!_play_referenced) {
161                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
162                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
163                         if (dc) {
164                                 if (dc->reference_video()) {
165                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
166                                 }
167                                 if (dc->reference_audio()) {
168                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
169                                 }
170                         }
171                 }
172         }
173
174         _last_video_time = DCPTime ();
175         _last_audio_time = DCPTime ();
176         _have_valid_pieces = true;
177 }
178
179 void
180 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
181 {
182         shared_ptr<Content> c = w.lock ();
183         if (!c) {
184                 return;
185         }
186
187         if (
188                 property == ContentProperty::POSITION ||
189                 property == ContentProperty::LENGTH ||
190                 property == ContentProperty::TRIM_START ||
191                 property == ContentProperty::TRIM_END ||
192                 property == ContentProperty::PATH ||
193                 property == VideoContentProperty::FRAME_TYPE ||
194                 property == DCPContentProperty::NEEDS_ASSETS ||
195                 property == DCPContentProperty::NEEDS_KDM ||
196                 property == SubtitleContentProperty::COLOUR ||
197                 property == SubtitleContentProperty::OUTLINE ||
198                 property == SubtitleContentProperty::SHADOW ||
199                 property == SubtitleContentProperty::EFFECT_COLOUR ||
200                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
201                 property == VideoContentProperty::COLOUR_CONVERSION
202                 ) {
203
204                 _have_valid_pieces = false;
205                 Changed (frequent);
206
207         } else if (
208                 property == SubtitleContentProperty::LINE_SPACING ||
209                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
210                 property == SubtitleContentProperty::Y_SCALE ||
211                 property == SubtitleContentProperty::FADE_IN ||
212                 property == SubtitleContentProperty::FADE_OUT ||
213                 property == ContentProperty::VIDEO_FRAME_RATE ||
214                 property == SubtitleContentProperty::USE ||
215                 property == SubtitleContentProperty::X_OFFSET ||
216                 property == SubtitleContentProperty::Y_OFFSET ||
217                 property == SubtitleContentProperty::X_SCALE ||
218                 property == SubtitleContentProperty::FONTS ||
219                 property == VideoContentProperty::CROP ||
220                 property == VideoContentProperty::SCALE ||
221                 property == VideoContentProperty::FADE_IN ||
222                 property == VideoContentProperty::FADE_OUT
223                 ) {
224
225                 Changed (frequent);
226         }
227 }
228
229 void
230 Player::set_video_container_size (dcp::Size s)
231 {
232         if (s == _video_container_size) {
233                 return;
234         }
235
236         _video_container_size = s;
237
238         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
239         _black_image->make_black ();
240
241         Changed (false);
242 }
243
244 void
245 Player::playlist_changed ()
246 {
247         _have_valid_pieces = false;
248         Changed (false);
249 }
250
251 void
252 Player::film_changed (Film::Property p)
253 {
254         /* Here we should notice Film properties that affect our output, and
255            alert listeners that our output now would be different to how it was
256            last time we were run.
257         */
258
259         if (p == Film::CONTAINER) {
260                 Changed (false);
261         } else if (p == Film::VIDEO_FRAME_RATE) {
262                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
263                    so we need new pieces here.
264                 */
265                 _have_valid_pieces = false;
266                 Changed (false);
267         } else if (p == Film::AUDIO_PROCESSOR) {
268                 if (_film->audio_processor ()) {
269                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
270                 }
271         }
272 }
273
274 list<PositionImage>
275 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
276 {
277         list<PositionImage> all;
278
279         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
280                 if (!i->image) {
281                         continue;
282                 }
283
284                 /* We will scale the subtitle up to fit _video_container_size */
285                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
286
287                 /* Then we need a corrective translation, consisting of two parts:
288                  *
289                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
290                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
291                  *
292                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
293                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
294                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
295                  *
296                  * Combining these two translations gives these expressions.
297                  */
298
299                 all.push_back (
300                         PositionImage (
301                                 i->image->scale (
302                                         scaled_size,
303                                         dcp::YUV_TO_RGB_REC601,
304                                         i->image->pixel_format (),
305                                         true,
306                                         _fast
307                                         ),
308                                 Position<int> (
309                                         lrint (_video_container_size.width * i->rectangle.x),
310                                         lrint (_video_container_size.height * i->rectangle.y)
311                                         )
312                                 )
313                         );
314         }
315
316         return all;
317 }
318
319 shared_ptr<PlayerVideo>
320 Player::black_player_video_frame () const
321 {
322         return shared_ptr<PlayerVideo> (
323                 new PlayerVideo (
324                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
325                         Crop (),
326                         optional<double> (),
327                         _video_container_size,
328                         _video_container_size,
329                         EYES_BOTH,
330                         PART_WHOLE,
331                         PresetColourConversion::all().front().conversion
332                 )
333         );
334 }
335
336 Frame
337 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
338 {
339         DCPTime s = t - piece->content->position ();
340         s = min (piece->content->length_after_trim(), s);
341         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
342
343         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
344            then convert that ContentTime to frames at the content's rate.  However this fails for
345            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
346            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
347
348            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
349         */
350         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
351 }
352
353 DCPTime
354 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
355 {
356         /* See comment in dcp_to_content_video */
357         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
358         return max (DCPTime (), d + piece->content->position ());
359 }
360
361 Frame
362 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
363 {
364         DCPTime s = t - piece->content->position ();
365         s = min (piece->content->length_after_trim(), s);
366         /* See notes in dcp_to_content_video */
367         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
368 }
369
370 DCPTime
371 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
372 {
373         /* See comment in dcp_to_content_video */
374         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
375         return max (DCPTime (), d + piece->content->position ());
376 }
377
378 ContentTime
379 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
380 {
381         DCPTime s = t - piece->content->position ();
382         s = min (piece->content->length_after_trim(), s);
383         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
384 }
385
386 DCPTime
387 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
388 {
389         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
390 }
391
392 list<shared_ptr<Font> >
393 Player::get_subtitle_fonts ()
394 {
395         if (!_have_valid_pieces) {
396                 setup_pieces ();
397         }
398
399         list<shared_ptr<Font> > fonts;
400         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
401                 if (p->content->subtitle) {
402                         /* XXX: things may go wrong if there are duplicate font IDs
403                            with different font files.
404                         */
405                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
406                         copy (f.begin(), f.end(), back_inserter (fonts));
407                 }
408         }
409
410         return fonts;
411 }
412
413 /** Set this player never to produce any video data */
414 void
415 Player::set_ignore_video ()
416 {
417         _ignore_video = true;
418 }
419
420 /** Set whether or not this player should always burn text subtitles into the image,
421  *  regardless of the content settings.
422  *  @param burn true to always burn subtitles, false to obey content settings.
423  */
424 void
425 Player::set_always_burn_subtitles (bool burn)
426 {
427         _always_burn_subtitles = burn;
428 }
429
430 void
431 Player::set_fast ()
432 {
433         _fast = true;
434         _have_valid_pieces = false;
435 }
436
437 void
438 Player::set_play_referenced ()
439 {
440         _play_referenced = true;
441         _have_valid_pieces = false;
442 }
443
444 list<ReferencedReelAsset>
445 Player::get_reel_assets ()
446 {
447         list<ReferencedReelAsset> a;
448
449         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
450                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
451                 if (!j) {
452                         continue;
453                 }
454
455                 scoped_ptr<DCPDecoder> decoder;
456                 try {
457                         decoder.reset (new DCPDecoder (j, _film->log()));
458                 } catch (...) {
459                         return a;
460                 }
461
462                 int64_t offset = 0;
463                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
464
465                         DCPOMATIC_ASSERT (j->video_frame_rate ());
466                         double const cfr = j->video_frame_rate().get();
467                         Frame const trim_start = j->trim_start().frames_round (cfr);
468                         Frame const trim_end = j->trim_end().frames_round (cfr);
469                         int const ffr = _film->video_frame_rate ();
470
471                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
472                         if (j->reference_video ()) {
473                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
474                                 DCPOMATIC_ASSERT (ra);
475                                 ra->set_entry_point (ra->entry_point() + trim_start);
476                                 ra->set_duration (ra->duration() - trim_start - trim_end);
477                                 a.push_back (
478                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
479                                         );
480                         }
481
482                         if (j->reference_audio ()) {
483                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
484                                 DCPOMATIC_ASSERT (ra);
485                                 ra->set_entry_point (ra->entry_point() + trim_start);
486                                 ra->set_duration (ra->duration() - trim_start - trim_end);
487                                 a.push_back (
488                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
489                                         );
490                         }
491
492                         if (j->reference_subtitle ()) {
493                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
494                                 DCPOMATIC_ASSERT (ra);
495                                 ra->set_entry_point (ra->entry_point() + trim_start);
496                                 ra->set_duration (ra->duration() - trim_start - trim_end);
497                                 a.push_back (
498                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
499                                         );
500                         }
501
502                         /* Assume that main picture duration is the length of the reel */
503                         offset += k->main_picture()->duration ();
504                 }
505         }
506
507         return a;
508 }
509
510 bool
511 Player::pass ()
512 {
513         if (!_have_valid_pieces) {
514                 setup_pieces ();
515         }
516
517         shared_ptr<Piece> earliest;
518         DCPTime earliest_content;
519
520         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
521                 if (!i->done) {
522                         DCPTime const t = content_time_to_dcp (i, i->decoder->position());
523                         if (!earliest || t < earliest_content) {
524                                 earliest_content = t;
525                                 earliest = i;
526                         }
527                 }
528         }
529
530         /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
531            NOT to fill gaps within content (the latter is done in ::video())
532         */
533         DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
534
535         /* Work out where to fill video from */
536         optional<DCPTime> video_fill_from;
537         if (_last_video_time) {
538                 /* Fill from the last video or seek time */
539                 video_fill_from = _last_video_time;
540         }
541
542         bool filled = false;
543         /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
544            Piece which emits black in spaces (we only emit if we are the earliest thing)
545         */
546         if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
547                 list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
548                 if (!p.empty ()) {
549                         emit_video (black_player_video_frame(), p.front().from);
550                         filled = true;
551                 }
552         } else if (_playlist->length() == DCPTime()) {
553                 /* Special case of an empty Film; just give one black frame */
554                 emit_video (black_player_video_frame(), DCPTime());
555                 filled = true;
556         }
557
558         optional<DCPTime> audio_fill_from;
559         if (_last_audio_time) {
560                 /* Fill from the last audio or seek time */
561                 audio_fill_from = _last_audio_time;
562         }
563
564         DCPTime audio_fill_towards = fill_towards;
565         if (earliest && earliest->content->audio) {
566                 audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
567         }
568
569         if (audio_fill_from && audio_fill_from < audio_fill_towards) {
570                 DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
571                 if (period.duration() > one_video_frame()) {
572                         period.to = period.from + one_video_frame();
573                 }
574                 list<DCPTimePeriod> p = subtract(period, _no_audio);
575                 if (!p.empty ()) {
576                         fill_audio (p.front());
577                 }
578                 filled = true;
579         }
580
581         if (earliest) {
582                 earliest->done = earliest->decoder->pass ();
583                 if (earliest->done && earliest->content->audio) {
584                         /* Flush the Player audio system for this piece */
585                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
586                                 audio_flush (earliest, i);
587                         }
588                 }
589         }
590
591         /* Emit any audio that is ready */
592
593         DCPTime pull_to = _playlist->length ();
594         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
595                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
596                         pull_to = i->second.last_push_end;
597                 }
598         }
599
600         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
601         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
602                 if (_last_audio_time && i->second < *_last_audio_time) {
603                         /* There has been an accurate seek and we have received some audio before the seek time;
604                            discard it.
605                         */
606                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
607                         if (!cut.first) {
608                                 continue;
609                         }
610                         *i = cut;
611                 }
612
613                 if (_last_audio_time) {
614                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
615                 }
616
617                 emit_audio (i->first, i->second);
618         }
619
620         return !earliest && !filled;
621 }
622
623 optional<PositionImage>
624 Player::subtitles_for_frame (DCPTime time) const
625 {
626         list<PositionImage> subtitles;
627
628         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
629
630                 /* Image subtitles */
631                 list<PositionImage> c = transform_image_subtitles (i.image);
632                 copy (c.begin(), c.end(), back_inserter (subtitles));
633
634                 /* Text subtitles (rendered to an image) */
635                 if (!i.text.empty ()) {
636                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
637                         copy (s.begin(), s.end(), back_inserter (subtitles));
638                 }
639         }
640
641         if (subtitles.empty ()) {
642                 return optional<PositionImage> ();
643         }
644
645         return merge (subtitles);
646 }
647
648 void
649 Player::video (weak_ptr<Piece> wp, ContentVideo video)
650 {
651         shared_ptr<Piece> piece = wp.lock ();
652         if (!piece) {
653                 return;
654         }
655
656         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
657         if (frc.skip && (video.frame % 2) == 1) {
658                 return;
659         }
660
661         /* Time and period of the frame we will emit */
662         DCPTime const time = content_video_to_dcp (piece, video.frame);
663         DCPTimePeriod const period (time, time + one_video_frame());
664
665         /* Discard if it's outside the content's period or if it's before the last accurate seek */
666         if (
667                 time < piece->content->position() ||
668                 time >= piece->content->end() ||
669                 (_last_video_time && time < *_last_video_time)) {
670                 return;
671         }
672
673         /* Fill gaps that we discover now that we have some video which needs to be emitted */
674
675         optional<DCPTime> fill_to;
676         if (_last_video_time) {
677                 fill_to = _last_video_time;
678         }
679
680         if (fill_to) {
681                 /* XXX: this may not work for 3D */
682                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
683                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
684                                 LastVideoMap::const_iterator k = _last_video.find (wp);
685                                 if (k != _last_video.end ()) {
686                                         emit_video (k->second, j);
687                                 } else {
688                                         emit_video (black_player_video_frame(), j);
689                                 }
690                         }
691                 }
692         }
693
694         _last_video[wp].reset (
695                 new PlayerVideo (
696                         video.image,
697                         piece->content->video->crop (),
698                         piece->content->video->fade (video.frame),
699                         piece->content->video->scale().size (
700                                 piece->content->video, _video_container_size, _film->frame_size ()
701                                 ),
702                         _video_container_size,
703                         video.eyes,
704                         video.part,
705                         piece->content->video->colour_conversion ()
706                         )
707                 );
708
709         emit_video (_last_video[wp], time);
710 }
711
712 void
713 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
714 {
715         shared_ptr<AudioContent> content = piece->content->audio;
716         DCPOMATIC_ASSERT (content);
717
718         shared_ptr<Resampler> r = resampler (content, stream, false);
719         if (!r) {
720                 return;
721         }
722
723         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
724         if (ro.first->frames() == 0) {
725                 return;
726         }
727
728         ContentAudio content_audio;
729         content_audio.audio = ro.first;
730         content_audio.frame = ro.second;
731
732         /* Compute time in the DCP */
733         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
734
735         audio_transform (content, stream, content_audio, time);
736 }
737
738 /** Do our common processing on some audio */
739 void
740 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
741 {
742         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
743
744         /* Gain */
745
746         if (content->gain() != 0) {
747                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
748                 gain->apply_gain (content->gain ());
749                 content_audio.audio = gain;
750         }
751
752         /* Remap */
753
754         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
755         dcp_mapped->make_silent ();
756
757         AudioMapping map = stream->mapping ();
758         for (int i = 0; i < map.input_channels(); ++i) {
759                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
760                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
761                                 dcp_mapped->accumulate_channel (
762                                         content_audio.audio.get(),
763                                         i,
764                                         static_cast<dcp::Channel> (j),
765                                         map.get (i, static_cast<dcp::Channel> (j))
766                                         );
767                         }
768                 }
769         }
770
771         content_audio.audio = dcp_mapped;
772
773         /* Process */
774
775         if (_audio_processor) {
776                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
777         }
778
779         /* Pad any gap which may be caused by audio delay */
780
781         if (_last_audio_time) {
782                 fill_audio (DCPTimePeriod (*_last_audio_time, time));
783         }
784
785         /* Push */
786
787         _audio_merger.push (content_audio.audio, time);
788         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
789         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
790 }
791
792 void
793 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
794 {
795         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
796
797         shared_ptr<Piece> piece = wp.lock ();
798         if (!piece) {
799                 return;
800         }
801
802         shared_ptr<AudioContent> content = piece->content->audio;
803         DCPOMATIC_ASSERT (content);
804
805         /* Resample */
806         if (stream->frame_rate() != content->resampled_frame_rate()) {
807                 shared_ptr<Resampler> r = resampler (content, stream, true);
808                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
809                 if (ro.first->frames() == 0) {
810                         return;
811                 }
812                 content_audio.audio = ro.first;
813                 content_audio.frame = ro.second;
814         }
815
816         /* Compute time in the DCP */
817         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
818         /* And the end of this block in the DCP */
819         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
820
821         /* Remove anything that comes before the start or after the end of the content */
822         if (time < piece->content->position()) {
823                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
824                 if (!cut.first) {
825                         /* This audio is entirely discarded */
826                         return;
827                 }
828                 content_audio.audio = cut.first;
829                 time = cut.second;
830         } else if (time > piece->content->end()) {
831                 /* Discard it all */
832                 return;
833         } else if (end > piece->content->end()) {
834                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
835                 if (remaining_frames == 0) {
836                         return;
837                 }
838                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
839                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
840                 content_audio.audio = cut;
841         }
842
843         audio_transform (content, stream, content_audio, time);
844 }
845
846 void
847 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
848 {
849         shared_ptr<Piece> piece = wp.lock ();
850         if (!piece) {
851                 return;
852         }
853
854         /* Apply content's subtitle offsets */
855         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
856         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
857
858         /* Apply content's subtitle scale */
859         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
860         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
861
862         /* Apply a corrective translation to keep the subtitle centred after that scale */
863         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
864         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
865
866         PlayerSubtitles ps;
867         ps.image.push_back (subtitle.sub);
868         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
869
870         _active_subtitles.add_from (wp, ps, from);
871 }
872
873 void
874 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
875 {
876         shared_ptr<Piece> piece = wp.lock ();
877         if (!piece) {
878                 return;
879         }
880
881         PlayerSubtitles ps;
882         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
883
884         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
885                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
886                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
887                 float const xs = piece->content->subtitle->x_scale();
888                 float const ys = piece->content->subtitle->y_scale();
889                 float size = s.size();
890
891                 /* Adjust size to express the common part of the scaling;
892                    e.g. if xs = ys = 0.5 we scale size by 2.
893                 */
894                 if (xs > 1e-5 && ys > 1e-5) {
895                         size *= 1 / min (1 / xs, 1 / ys);
896                 }
897                 s.set_size (size);
898
899                 /* Then express aspect ratio changes */
900                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
901                         s.set_aspect_adjust (xs / ys);
902                 }
903
904                 s.set_in (dcp::Time(from.seconds(), 1000));
905                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
906                 ps.add_fonts (piece->content->subtitle->fonts ());
907         }
908
909         _active_subtitles.add_from (wp, ps, from);
910 }
911
912 void
913 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
914 {
915         if (!_active_subtitles.have (wp)) {
916                 return;
917         }
918
919         shared_ptr<Piece> piece = wp.lock ();
920         if (!piece) {
921                 return;
922         }
923
924         DCPTime const dcp_to = content_time_to_dcp (piece, to);
925
926         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
927
928         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
929                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
930         }
931 }
932
933 void
934 Player::seek (DCPTime time, bool accurate)
935 {
936         if (_audio_processor) {
937                 _audio_processor->flush ();
938         }
939
940         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
941                 i->second->flush ();
942                 i->second->reset ();
943         }
944
945         _audio_merger.clear ();
946         _active_subtitles.clear ();
947
948         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
949                 if (time < i->content->position()) {
950                         /* Before; seek to 0 */
951                         i->decoder->seek (ContentTime(), accurate);
952                         i->done = false;
953                 } else if (i->content->position() <= time && time < i->content->end()) {
954                         /* During; seek to position */
955                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
956                         i->done = false;
957                 } else {
958                         /* After; this piece is done */
959                         i->done = true;
960                 }
961         }
962
963         if (accurate) {
964                 _last_video_time = time;
965                 _last_audio_time = time;
966         } else {
967                 _last_video_time = optional<DCPTime>();
968                 _last_audio_time = optional<DCPTime>();
969         }
970 }
971
972 shared_ptr<Resampler>
973 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
974 {
975         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
976         if (i != _resamplers.end ()) {
977                 return i->second;
978         }
979
980         if (!create) {
981                 return shared_ptr<Resampler> ();
982         }
983
984         LOG_GENERAL (
985                 "Creating new resampler from %1 to %2 with %3 channels",
986                 stream->frame_rate(),
987                 content->resampled_frame_rate(),
988                 stream->channels()
989                 );
990
991         shared_ptr<Resampler> r (
992                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
993                 );
994
995         _resamplers[make_pair(content, stream)] = r;
996         return r;
997 }
998
999 void
1000 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
1001 {
1002         optional<PositionImage> subtitles = subtitles_for_frame (time);
1003         if (subtitles) {
1004                 pv->set_subtitle (subtitles.get ());
1005         }
1006
1007         Video (pv, time);
1008
1009         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
1010                 _last_video_time = time + one_video_frame();
1011                 _active_subtitles.clear_before (time);
1012         }
1013 }
1014
1015 void
1016 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1017 {
1018         Audio (data, time);
1019         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1020 }
1021
1022 void
1023 Player::fill_audio (DCPTimePeriod period)
1024 {
1025         if (period.from == period.to) {
1026                 return;
1027         }
1028
1029         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1030                 DCPTime t = i.from;
1031                 while (t < i.to) {
1032                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1033                         Frame const samples = block.frames_round(_film->audio_frame_rate());
1034                         if (samples) {
1035                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1036                                 silence->make_silent ();
1037                                 emit_audio (silence, t);
1038                         }
1039                         t += block;
1040                 }
1041         }
1042 }
1043
1044 DCPTime
1045 Player::one_video_frame () const
1046 {
1047         return DCPTime::from_frames (1, _film->video_frame_rate ());
1048 }
1049
1050 pair<shared_ptr<AudioBuffers>, DCPTime>
1051 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1052 {
1053         DCPTime const discard_time = discard_to - time;
1054         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1055         Frame remaining_frames = audio->frames() - discard_frames;
1056         if (remaining_frames <= 0) {
1057                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1058         }
1059         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1060         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1061         return make_pair(cut, time + discard_time);
1062 }