Fill audio in the Player.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _last_seek_accurate (true)
91         , _audio_merger (_film->audio_frame_rate())
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 void
104 Player::setup_pieces ()
105 {
106         _pieces.clear ();
107
108         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109
110                 if (!i->paths_valid ()) {
111                         continue;
112                 }
113
114                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116
117                 if (!decoder) {
118                         /* Not something that we can decode; e.g. Atmos content */
119                         continue;
120                 }
121
122                 if (decoder->video && _ignore_video) {
123                         decoder->video->set_ignore ();
124                 }
125
126                 if (decoder->audio && _ignore_audio) {
127                         decoder->audio->set_ignore ();
128                 }
129
130                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131                 if (dcp && _play_referenced) {
132                         dcp->set_decode_referenced ();
133                 }
134
135                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136                 _pieces.push_back (piece);
137
138                 if (decoder->video) {
139                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140                 }
141
142                 if (decoder->audio) {
143                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144                 }
145
146                 if (decoder->subtitle) {
147                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
150                 }
151         }
152
153         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154                 if (i->content->audio) {
155                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156                                 _stream_states[j] = StreamState (i, i->content->position ());
157                         }
158                 }
159         }
160
161         if (!_play_referenced) {
162                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164                         if (dc) {
165                                 if (dc->reference_video()) {
166                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167                                 }
168                                 if (dc->reference_audio()) {
169                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
170                                 }
171                         }
172                 }
173         }
174
175         _last_video_time = optional<DCPTime> ();
176         _last_audio_time = optional<DCPTime> ();
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::FRAME_TYPE ||
195                 property == DCPContentProperty::NEEDS_ASSETS ||
196                 property == DCPContentProperty::NEEDS_KDM ||
197                 property == SubtitleContentProperty::COLOUR ||
198                 property == SubtitleContentProperty::OUTLINE ||
199                 property == SubtitleContentProperty::SHADOW ||
200                 property == SubtitleContentProperty::EFFECT_COLOUR ||
201                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202                 property == VideoContentProperty::COLOUR_CONVERSION
203                 ) {
204
205                 _have_valid_pieces = false;
206                 Changed (frequent);
207
208         } else if (
209                 property == SubtitleContentProperty::LINE_SPACING ||
210                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211                 property == SubtitleContentProperty::Y_SCALE ||
212                 property == SubtitleContentProperty::FADE_IN ||
213                 property == SubtitleContentProperty::FADE_OUT ||
214                 property == ContentProperty::VIDEO_FRAME_RATE ||
215                 property == SubtitleContentProperty::USE ||
216                 property == SubtitleContentProperty::X_OFFSET ||
217                 property == SubtitleContentProperty::Y_OFFSET ||
218                 property == SubtitleContentProperty::X_SCALE ||
219                 property == SubtitleContentProperty::FONTS ||
220                 property == VideoContentProperty::CROP ||
221                 property == VideoContentProperty::SCALE ||
222                 property == VideoContentProperty::FADE_IN ||
223                 property == VideoContentProperty::FADE_OUT
224                 ) {
225
226                 Changed (frequent);
227         }
228 }
229
230 void
231 Player::set_video_container_size (dcp::Size s)
232 {
233         if (s == _video_container_size) {
234                 return;
235         }
236
237         _video_container_size = s;
238
239         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240         _black_image->make_black ();
241
242         Changed (false);
243 }
244
245 void
246 Player::playlist_changed ()
247 {
248         _have_valid_pieces = false;
249         Changed (false);
250 }
251
252 void
253 Player::film_changed (Film::Property p)
254 {
255         /* Here we should notice Film properties that affect our output, and
256            alert listeners that our output now would be different to how it was
257            last time we were run.
258         */
259
260         if (p == Film::CONTAINER) {
261                 Changed (false);
262         } else if (p == Film::VIDEO_FRAME_RATE) {
263                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264                    so we need new pieces here.
265                 */
266                 _have_valid_pieces = false;
267                 Changed (false);
268         } else if (p == Film::AUDIO_PROCESSOR) {
269                 if (_film->audio_processor ()) {
270                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
271                 }
272         }
273 }
274
275 list<PositionImage>
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 {
278         list<PositionImage> all;
279
280         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
281                 if (!i->image) {
282                         continue;
283                 }
284
285                 /* We will scale the subtitle up to fit _video_container_size */
286                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287
288                 /* Then we need a corrective translation, consisting of two parts:
289                  *
290                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
291                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292                  *
293                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296                  *
297                  * Combining these two translations gives these expressions.
298                  */
299
300                 all.push_back (
301                         PositionImage (
302                                 i->image->scale (
303                                         scaled_size,
304                                         dcp::YUV_TO_RGB_REC601,
305                                         i->image->pixel_format (),
306                                         true,
307                                         _fast
308                                         ),
309                                 Position<int> (
310                                         lrint (_video_container_size.width * i->rectangle.x),
311                                         lrint (_video_container_size.height * i->rectangle.y)
312                                         )
313                                 )
314                         );
315         }
316
317         return all;
318 }
319
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
322 {
323         return shared_ptr<PlayerVideo> (
324                 new PlayerVideo (
325                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326                         Crop (),
327                         optional<double> (),
328                         _video_container_size,
329                         _video_container_size,
330                         EYES_BOTH,
331                         PART_WHOLE,
332                         PresetColourConversion::all().front().conversion
333                 )
334         );
335 }
336
337 Frame
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 {
340         DCPTime s = t - piece->content->position ();
341         s = min (piece->content->length_after_trim(), s);
342         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343
344         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345            then convert that ContentTime to frames at the content's rate.  However this fails for
346            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
347            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348
349            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350         */
351         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
352 }
353
354 DCPTime
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 {
357         /* See comment in dcp_to_content_video */
358         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359         return max (DCPTime (), d + piece->content->position ());
360 }
361
362 Frame
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         /* See notes in dcp_to_content_video */
368         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
369 }
370
371 DCPTime
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376         return max (DCPTime (), d + piece->content->position ());
377 }
378
379 ContentTime
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(), s);
384         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
385 }
386
387 DCPTime
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 {
390         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 }
392
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         list<shared_ptr<Font> > fonts;
401         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402                 if (p->content->subtitle) {
403                         /* XXX: things may go wrong if there are duplicate font IDs
404                            with different font files.
405                         */
406                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407                         copy (f.begin(), f.end(), back_inserter (fonts));
408                 }
409         }
410
411         return fonts;
412 }
413
414 /** Set this player never to produce any video data */
415 void
416 Player::set_ignore_video ()
417 {
418         _ignore_video = true;
419 }
420
421 /** Set whether or not this player should always burn text subtitles into the image,
422  *  regardless of the content settings.
423  *  @param burn true to always burn subtitles, false to obey content settings.
424  */
425 void
426 Player::set_always_burn_subtitles (bool burn)
427 {
428         _always_burn_subtitles = burn;
429 }
430
431 void
432 Player::set_fast ()
433 {
434         _fast = true;
435         _have_valid_pieces = false;
436 }
437
438 void
439 Player::set_play_referenced ()
440 {
441         _play_referenced = true;
442         _have_valid_pieces = false;
443 }
444
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
447 {
448         list<ReferencedReelAsset> a;
449
450         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
452                 if (!j) {
453                         continue;
454                 }
455
456                 scoped_ptr<DCPDecoder> decoder;
457                 try {
458                         decoder.reset (new DCPDecoder (j, _film->log()));
459                 } catch (...) {
460                         return a;
461                 }
462
463                 int64_t offset = 0;
464                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465
466                         DCPOMATIC_ASSERT (j->video_frame_rate ());
467                         double const cfr = j->video_frame_rate().get();
468                         Frame const trim_start = j->trim_start().frames_round (cfr);
469                         Frame const trim_end = j->trim_end().frames_round (cfr);
470                         int const ffr = _film->video_frame_rate ();
471
472                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473                         if (j->reference_video ()) {
474                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475                                 DCPOMATIC_ASSERT (ra);
476                                 ra->set_entry_point (ra->entry_point() + trim_start);
477                                 ra->set_duration (ra->duration() - trim_start - trim_end);
478                                 a.push_back (
479                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480                                         );
481                         }
482
483                         if (j->reference_audio ()) {
484                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485                                 DCPOMATIC_ASSERT (ra);
486                                 ra->set_entry_point (ra->entry_point() + trim_start);
487                                 ra->set_duration (ra->duration() - trim_start - trim_end);
488                                 a.push_back (
489                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490                                         );
491                         }
492
493                         if (j->reference_subtitle ()) {
494                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495                                 DCPOMATIC_ASSERT (ra);
496                                 ra->set_entry_point (ra->entry_point() + trim_start);
497                                 ra->set_duration (ra->duration() - trim_start - trim_end);
498                                 a.push_back (
499                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500                                         );
501                         }
502
503                         /* Assume that main picture duration is the length of the reel */
504                         offset += k->main_picture()->duration ();
505                 }
506         }
507
508         return a;
509 }
510
511 bool
512 Player::pass ()
513 {
514         if (!_have_valid_pieces) {
515                 setup_pieces ();
516         }
517
518         shared_ptr<Piece> earliest;
519         DCPTime earliest_content;
520
521         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522                 if (!i->done) {
523                         DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524                         if (!earliest || t < earliest_content) {
525                                 earliest_content = t;
526                                 earliest = i;
527                         }
528                 }
529         }
530
531         if (earliest) {
532                 earliest->done = earliest->decoder->pass ();
533                 if (earliest->done && earliest->content->audio) {
534                         /* Flush the Player audio system for this piece */
535                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536                                 audio_flush (earliest, i);
537                         }
538                 }
539         }
540
541         /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
542            NOT to fill gaps within content (the latter is done in ::video())
543         */
544         DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
545
546         /* Work out where to fill video from */
547         optional<DCPTime> video_fill_from;
548         if (_last_video_time && !_playlist->video_content_at(_last_video_time.get())) {
549                 /* No seek; fill towards the next thing that might happen (or the end of the playlist) */
550                 video_fill_from = _last_video_time;
551         } else if (_last_seek_time && !_playlist->video_content_at(_last_seek_time.get())) {
552                 /* Seek into an empty area; fill from the seek time */
553                 video_fill_from = _last_seek_time;
554         }
555
556         bool filled = false;
557
558         if (video_fill_from && ((fill_towards - video_fill_from.get())) > one_video_frame()) {
559                 emit_video (black_player_video_frame(), video_fill_from.get());
560                 filled = true;
561         } else if (_playlist->length() == DCPTime()) {
562                 /* Special case of an empty Film; just give one black frame */
563                 emit_video (black_player_video_frame(), DCPTime());
564                 filled = true;
565         }
566
567         optional<DCPTime> audio_fill_from;
568         if (_last_audio_time && !_playlist->audio_content_at(_last_audio_time.get())) {
569                 /* No seek; fill from the last thing that happened */
570                 audio_fill_from = _last_audio_time;
571         } else if (_last_seek_time && !_playlist->audio_content_at(_last_seek_time.get())) {
572                 /* Seek into an empty area; fill from the seek time */
573                 audio_fill_from = _last_seek_time;
574         }
575
576         if (audio_fill_from && audio_fill_from < fill_towards) {
577                 DCPTimePeriod period (audio_fill_from.get(), fill_towards);
578                 if (period.duration() > one_video_frame()) {
579                         period.to = period.from + one_video_frame();
580                 }
581                 fill_audio (period);
582                 filled = true;
583         }
584
585         if (!earliest && !filled) {
586                 return true;
587         }
588
589         /* Emit any audio that is ready */
590
591         DCPTime pull_from = _playlist->length ();
592         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
593                 if (!i->second.piece->done && i->second.last_push_end < pull_from) {
594                         pull_from = i->second.last_push_end;
595                 }
596         }
597
598         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
599         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
600                 if (_last_audio_time && i->second < _last_audio_time.get()) {
601                         /* There has been an accurate seek and we have received some audio before the seek time;
602                            discard it.
603                         */
604                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
605                         if (!cut.first) {
606                                 continue;
607                         }
608                         *i = cut;
609                 }
610
611                 if (_last_audio_time) {
612                         fill_audio (DCPTimePeriod (_last_audio_time.get(), i->second));
613                 }
614
615                 emit_audio (i->first, i->second);
616         }
617
618         return false;
619 }
620
621 optional<PositionImage>
622 Player::subtitles_for_frame (DCPTime time) const
623 {
624         list<PositionImage> subtitles;
625
626         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
627
628                 /* Image subtitles */
629                 list<PositionImage> c = transform_image_subtitles (i.image);
630                 copy (c.begin(), c.end(), back_inserter (subtitles));
631
632                 /* Text subtitles (rendered to an image) */
633                 if (!i.text.empty ()) {
634                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
635                         copy (s.begin(), s.end(), back_inserter (subtitles));
636                 }
637         }
638
639         if (subtitles.empty ()) {
640                 return optional<PositionImage> ();
641         }
642
643         return merge (subtitles);
644 }
645
646 void
647 Player::video (weak_ptr<Piece> wp, ContentVideo video)
648 {
649         shared_ptr<Piece> piece = wp.lock ();
650         if (!piece) {
651                 return;
652         }
653
654         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
655         if (frc.skip && (video.frame % 2) == 1) {
656                 return;
657         }
658
659         /* Time and period of the frame we will emit */
660         DCPTime const time = content_video_to_dcp (piece, video.frame);
661         DCPTimePeriod const period (time, time + one_video_frame());
662
663         /* Discard if it's outside the content's period or if it's before the last accurate seek */
664         if (
665                 time < piece->content->position() ||
666                 time >= piece->content->end() ||
667                 (_last_seek_time && _last_seek_accurate && time < _last_seek_time.get())) {
668                 return;
669         }
670
671         /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video.  We have to do this here
672            as in the problematic case we are about to emit a frame which is not contiguous with the previous.
673         */
674
675         if (_last_video_time) {
676                 /* XXX: this may not work for 3D */
677                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (_last_video_time.get(), time), _no_video)) {
678                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
679                                 if (_last_video) {
680                                         emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
681                                 } else {
682                                         emit_video (black_player_video_frame(), j);
683                                 }
684                         }
685                 }
686         }
687
688         _last_video.reset (
689                 new PlayerVideo (
690                         video.image,
691                         piece->content->video->crop (),
692                         piece->content->video->fade (video.frame),
693                         piece->content->video->scale().size (
694                                 piece->content->video, _video_container_size, _film->frame_size ()
695                                 ),
696                         _video_container_size,
697                         video.eyes,
698                         video.part,
699                         piece->content->video->colour_conversion ()
700                         )
701                 );
702
703         emit_video (_last_video, time);
704 }
705
706 void
707 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
708 {
709         shared_ptr<AudioContent> content = piece->content->audio;
710         DCPOMATIC_ASSERT (content);
711
712         shared_ptr<Resampler> r = resampler (content, stream, false);
713         if (!r) {
714                 return;
715         }
716
717         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
718         if (ro.first->frames() == 0) {
719                 return;
720         }
721
722         ContentAudio content_audio;
723         content_audio.audio = ro.first;
724         content_audio.frame = ro.second;
725
726         /* Compute time in the DCP */
727         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
728
729         audio_transform (content, stream, content_audio, time);
730 }
731
732 /** Do our common processing on some audio */
733 void
734 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
735 {
736         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
737
738         /* Gain */
739
740         if (content->gain() != 0) {
741                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
742                 gain->apply_gain (content->gain ());
743                 content_audio.audio = gain;
744         }
745
746         /* Remap */
747
748         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
749         dcp_mapped->make_silent ();
750
751         AudioMapping map = stream->mapping ();
752         for (int i = 0; i < map.input_channels(); ++i) {
753                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
754                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
755                                 dcp_mapped->accumulate_channel (
756                                         content_audio.audio.get(),
757                                         i,
758                                         static_cast<dcp::Channel> (j),
759                                         map.get (i, static_cast<dcp::Channel> (j))
760                                         );
761                         }
762                 }
763         }
764
765         content_audio.audio = dcp_mapped;
766
767         /* Process */
768
769         if (_audio_processor) {
770                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
771         }
772
773         /* Push */
774
775         _audio_merger.push (content_audio.audio, time);
776         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
777         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
778 }
779
780 void
781 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
782 {
783         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
784
785         shared_ptr<Piece> piece = wp.lock ();
786         if (!piece) {
787                 return;
788         }
789
790         shared_ptr<AudioContent> content = piece->content->audio;
791         DCPOMATIC_ASSERT (content);
792
793         /* Resample */
794         if (stream->frame_rate() != content->resampled_frame_rate()) {
795                 shared_ptr<Resampler> r = resampler (content, stream, true);
796                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
797                 if (ro.first->frames() == 0) {
798                         return;
799                 }
800                 content_audio.audio = ro.first;
801                 content_audio.frame = ro.second;
802         }
803
804         /* Compute time in the DCP */
805         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
806         /* And the end of this block in the DCP */
807         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
808
809         /* Remove anything that comes before the start or after the end of the content */
810         if (time < piece->content->position()) {
811                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
812                 if (!cut.first) {
813                         /* This audio is entirely discarded */
814                         return;
815                 }
816                 content_audio.audio = cut.first;
817                 time = cut.second;
818         } else if (time > piece->content->end()) {
819                 /* Discard it all */
820                 return;
821         } else if (end > piece->content->end()) {
822                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
823                 if (remaining_frames == 0) {
824                         return;
825                 }
826                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
827                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
828                 content_audio.audio = cut;
829         }
830
831         audio_transform (content, stream, content_audio, time);
832 }
833
834 void
835 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
836 {
837         shared_ptr<Piece> piece = wp.lock ();
838         if (!piece) {
839                 return;
840         }
841
842         /* Apply content's subtitle offsets */
843         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
844         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
845
846         /* Apply content's subtitle scale */
847         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
848         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
849
850         /* Apply a corrective translation to keep the subtitle centred after that scale */
851         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
852         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
853
854         PlayerSubtitles ps;
855         ps.image.push_back (subtitle.sub);
856         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
857
858         _active_subtitles.add_from (wp, ps, from);
859 }
860
861 void
862 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
863 {
864         shared_ptr<Piece> piece = wp.lock ();
865         if (!piece) {
866                 return;
867         }
868
869         PlayerSubtitles ps;
870         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
871
872         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
873                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
874                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
875                 float const xs = piece->content->subtitle->x_scale();
876                 float const ys = piece->content->subtitle->y_scale();
877                 float size = s.size();
878
879                 /* Adjust size to express the common part of the scaling;
880                    e.g. if xs = ys = 0.5 we scale size by 2.
881                 */
882                 if (xs > 1e-5 && ys > 1e-5) {
883                         size *= 1 / min (1 / xs, 1 / ys);
884                 }
885                 s.set_size (size);
886
887                 /* Then express aspect ratio changes */
888                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
889                         s.set_aspect_adjust (xs / ys);
890                 }
891
892                 s.set_in (dcp::Time(from.seconds(), 1000));
893                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
894                 ps.add_fonts (piece->content->subtitle->fonts ());
895         }
896
897         _active_subtitles.add_from (wp, ps, from);
898 }
899
900 void
901 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
902 {
903         if (!_active_subtitles.have (wp)) {
904                 return;
905         }
906
907         shared_ptr<Piece> piece = wp.lock ();
908         if (!piece) {
909                 return;
910         }
911
912         DCPTime const dcp_to = content_time_to_dcp (piece, to);
913
914         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
915
916         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
917                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
918         }
919 }
920
921 void
922 Player::seek (DCPTime time, bool accurate)
923 {
924         if (_audio_processor) {
925                 _audio_processor->flush ();
926         }
927
928         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
929                 i->second->flush ();
930                 i->second->reset ();
931         }
932
933         _audio_merger.clear ();
934         _active_subtitles.clear ();
935
936         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
937                 if (time < i->content->position()) {
938                         /* Before; seek to 0 */
939                         i->decoder->seek (ContentTime(), accurate);
940                         i->done = false;
941                 } else if (i->content->position() <= time && time < i->content->end()) {
942                         /* During; seek to position */
943                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
944                         i->done = false;
945                 } else {
946                         /* After; this piece is done */
947                         i->done = true;
948                 }
949         }
950
951         _last_video_time = optional<DCPTime> ();
952         _last_audio_time = optional<DCPTime> ();
953         _last_seek_time = time;
954         _last_seek_accurate = accurate;
955 }
956
957 shared_ptr<Resampler>
958 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
959 {
960         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
961         if (i != _resamplers.end ()) {
962                 return i->second;
963         }
964
965         if (!create) {
966                 return shared_ptr<Resampler> ();
967         }
968
969         LOG_GENERAL (
970                 "Creating new resampler from %1 to %2 with %3 channels",
971                 stream->frame_rate(),
972                 content->resampled_frame_rate(),
973                 stream->channels()
974                 );
975
976         shared_ptr<Resampler> r (
977                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
978                 );
979
980         _resamplers[make_pair(content, stream)] = r;
981         return r;
982 }
983
984 void
985 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
986 {
987         optional<PositionImage> subtitles = subtitles_for_frame (time);
988         if (subtitles) {
989                 pv->set_subtitle (subtitles.get ());
990         }
991         Video (pv, time);
992         _last_video_time = time + one_video_frame();
993         _active_subtitles.clear_before (time);
994 }
995
996 void
997 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
998 {
999         Audio (data, time);
1000         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1001 }
1002
1003 void
1004 Player::fill_audio (DCPTimePeriod period)
1005 {
1006         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1007                 DCPTime t = i.from;
1008                 while (t < i.to) {
1009                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1010                         Frame const samples = block.frames_round(_film->audio_frame_rate());
1011                         if (samples) {
1012                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1013                                 silence->make_silent ();
1014                                 emit_audio (silence, t);
1015                         }
1016                         t += block;
1017                 }
1018         }
1019 }
1020
1021 DCPTime
1022 Player::one_video_frame () const
1023 {
1024         return DCPTime::from_frames (1, _film->video_frame_rate ());
1025 }
1026
1027 pair<shared_ptr<AudioBuffers>, DCPTime>
1028 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1029 {
1030         DCPTime const discard_time = discard_to - time;
1031         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1032         Frame remaining_frames = audio->frames() - discard_frames;
1033         if (remaining_frames <= 0) {
1034                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1035         }
1036         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1037         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1038         return make_pair(cut, time + discard_time);
1039 }