Fix missing pad of gaps caused by delays in audio content at the start of a Film...
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _last_seek_accurate (true)
91         , _audio_merger (_film->audio_frame_rate())
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 void
104 Player::setup_pieces ()
105 {
106         _pieces.clear ();
107
108         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109
110                 if (!i->paths_valid ()) {
111                         continue;
112                 }
113
114                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116
117                 if (!decoder) {
118                         /* Not something that we can decode; e.g. Atmos content */
119                         continue;
120                 }
121
122                 if (decoder->video && _ignore_video) {
123                         decoder->video->set_ignore ();
124                 }
125
126                 if (decoder->audio && _ignore_audio) {
127                         decoder->audio->set_ignore ();
128                 }
129
130                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131                 if (dcp && _play_referenced) {
132                         dcp->set_decode_referenced ();
133                 }
134
135                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136                 _pieces.push_back (piece);
137
138                 if (decoder->video) {
139                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140                 }
141
142                 if (decoder->audio) {
143                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144                 }
145
146                 if (decoder->subtitle) {
147                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
150                 }
151         }
152
153         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154                 if (i->content->audio) {
155                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156                                 _stream_states[j] = StreamState (i, i->content->position ());
157                         }
158                 }
159         }
160
161         if (!_play_referenced) {
162                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164                         if (dc) {
165                                 if (dc->reference_video()) {
166                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167                                 }
168                                 if (dc->reference_audio()) {
169                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
170                                 }
171                         }
172                 }
173         }
174
175         _last_video_time = optional<DCPTime> ();
176         _last_audio_time = optional<DCPTime> ();
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::FRAME_TYPE ||
195                 property == DCPContentProperty::NEEDS_ASSETS ||
196                 property == DCPContentProperty::NEEDS_KDM ||
197                 property == SubtitleContentProperty::COLOUR ||
198                 property == SubtitleContentProperty::OUTLINE ||
199                 property == SubtitleContentProperty::SHADOW ||
200                 property == SubtitleContentProperty::EFFECT_COLOUR ||
201                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202                 property == VideoContentProperty::COLOUR_CONVERSION
203                 ) {
204
205                 _have_valid_pieces = false;
206                 Changed (frequent);
207
208         } else if (
209                 property == SubtitleContentProperty::LINE_SPACING ||
210                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211                 property == SubtitleContentProperty::Y_SCALE ||
212                 property == SubtitleContentProperty::FADE_IN ||
213                 property == SubtitleContentProperty::FADE_OUT ||
214                 property == ContentProperty::VIDEO_FRAME_RATE ||
215                 property == SubtitleContentProperty::USE ||
216                 property == SubtitleContentProperty::X_OFFSET ||
217                 property == SubtitleContentProperty::Y_OFFSET ||
218                 property == SubtitleContentProperty::X_SCALE ||
219                 property == SubtitleContentProperty::FONTS ||
220                 property == VideoContentProperty::CROP ||
221                 property == VideoContentProperty::SCALE ||
222                 property == VideoContentProperty::FADE_IN ||
223                 property == VideoContentProperty::FADE_OUT
224                 ) {
225
226                 Changed (frequent);
227         }
228 }
229
230 void
231 Player::set_video_container_size (dcp::Size s)
232 {
233         if (s == _video_container_size) {
234                 return;
235         }
236
237         _video_container_size = s;
238
239         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240         _black_image->make_black ();
241
242         Changed (false);
243 }
244
245 void
246 Player::playlist_changed ()
247 {
248         _have_valid_pieces = false;
249         Changed (false);
250 }
251
252 void
253 Player::film_changed (Film::Property p)
254 {
255         /* Here we should notice Film properties that affect our output, and
256            alert listeners that our output now would be different to how it was
257            last time we were run.
258         */
259
260         if (p == Film::CONTAINER) {
261                 Changed (false);
262         } else if (p == Film::VIDEO_FRAME_RATE) {
263                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264                    so we need new pieces here.
265                 */
266                 _have_valid_pieces = false;
267                 Changed (false);
268         } else if (p == Film::AUDIO_PROCESSOR) {
269                 if (_film->audio_processor ()) {
270                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
271                 }
272         }
273 }
274
275 list<PositionImage>
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 {
278         list<PositionImage> all;
279
280         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
281                 if (!i->image) {
282                         continue;
283                 }
284
285                 /* We will scale the subtitle up to fit _video_container_size */
286                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287
288                 /* Then we need a corrective translation, consisting of two parts:
289                  *
290                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
291                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292                  *
293                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296                  *
297                  * Combining these two translations gives these expressions.
298                  */
299
300                 all.push_back (
301                         PositionImage (
302                                 i->image->scale (
303                                         scaled_size,
304                                         dcp::YUV_TO_RGB_REC601,
305                                         i->image->pixel_format (),
306                                         true,
307                                         _fast
308                                         ),
309                                 Position<int> (
310                                         lrint (_video_container_size.width * i->rectangle.x),
311                                         lrint (_video_container_size.height * i->rectangle.y)
312                                         )
313                                 )
314                         );
315         }
316
317         return all;
318 }
319
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
322 {
323         return shared_ptr<PlayerVideo> (
324                 new PlayerVideo (
325                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326                         Crop (),
327                         optional<double> (),
328                         _video_container_size,
329                         _video_container_size,
330                         EYES_BOTH,
331                         PART_WHOLE,
332                         PresetColourConversion::all().front().conversion
333                 )
334         );
335 }
336
337 Frame
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 {
340         DCPTime s = t - piece->content->position ();
341         s = min (piece->content->length_after_trim(), s);
342         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343
344         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345            then convert that ContentTime to frames at the content's rate.  However this fails for
346            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
347            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348
349            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350         */
351         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
352 }
353
354 DCPTime
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 {
357         /* See comment in dcp_to_content_video */
358         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359         return max (DCPTime (), d + piece->content->position ());
360 }
361
362 Frame
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         /* See notes in dcp_to_content_video */
368         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
369 }
370
371 DCPTime
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376         return max (DCPTime (), d + piece->content->position ());
377 }
378
379 ContentTime
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(), s);
384         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
385 }
386
387 DCPTime
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 {
390         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 }
392
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         list<shared_ptr<Font> > fonts;
401         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402                 if (p->content->subtitle) {
403                         /* XXX: things may go wrong if there are duplicate font IDs
404                            with different font files.
405                         */
406                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407                         copy (f.begin(), f.end(), back_inserter (fonts));
408                 }
409         }
410
411         return fonts;
412 }
413
414 /** Set this player never to produce any video data */
415 void
416 Player::set_ignore_video ()
417 {
418         _ignore_video = true;
419 }
420
421 /** Set whether or not this player should always burn text subtitles into the image,
422  *  regardless of the content settings.
423  *  @param burn true to always burn subtitles, false to obey content settings.
424  */
425 void
426 Player::set_always_burn_subtitles (bool burn)
427 {
428         _always_burn_subtitles = burn;
429 }
430
431 void
432 Player::set_fast ()
433 {
434         _fast = true;
435         _have_valid_pieces = false;
436 }
437
438 void
439 Player::set_play_referenced ()
440 {
441         _play_referenced = true;
442         _have_valid_pieces = false;
443 }
444
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
447 {
448         list<ReferencedReelAsset> a;
449
450         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
452                 if (!j) {
453                         continue;
454                 }
455
456                 scoped_ptr<DCPDecoder> decoder;
457                 try {
458                         decoder.reset (new DCPDecoder (j, _film->log()));
459                 } catch (...) {
460                         return a;
461                 }
462
463                 int64_t offset = 0;
464                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465
466                         DCPOMATIC_ASSERT (j->video_frame_rate ());
467                         double const cfr = j->video_frame_rate().get();
468                         Frame const trim_start = j->trim_start().frames_round (cfr);
469                         Frame const trim_end = j->trim_end().frames_round (cfr);
470                         int const ffr = _film->video_frame_rate ();
471
472                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473                         if (j->reference_video ()) {
474                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475                                 DCPOMATIC_ASSERT (ra);
476                                 ra->set_entry_point (ra->entry_point() + trim_start);
477                                 ra->set_duration (ra->duration() - trim_start - trim_end);
478                                 a.push_back (
479                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480                                         );
481                         }
482
483                         if (j->reference_audio ()) {
484                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485                                 DCPOMATIC_ASSERT (ra);
486                                 ra->set_entry_point (ra->entry_point() + trim_start);
487                                 ra->set_duration (ra->duration() - trim_start - trim_end);
488                                 a.push_back (
489                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490                                         );
491                         }
492
493                         if (j->reference_subtitle ()) {
494                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495                                 DCPOMATIC_ASSERT (ra);
496                                 ra->set_entry_point (ra->entry_point() + trim_start);
497                                 ra->set_duration (ra->duration() - trim_start - trim_end);
498                                 a.push_back (
499                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500                                         );
501                         }
502
503                         /* Assume that main picture duration is the length of the reel */
504                         offset += k->main_picture()->duration ();
505                 }
506         }
507
508         return a;
509 }
510
511 bool
512 Player::pass ()
513 {
514         if (!_have_valid_pieces) {
515                 setup_pieces ();
516         }
517
518         shared_ptr<Piece> earliest;
519         DCPTime earliest_content;
520
521         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522                 if (!i->done) {
523                         DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524                         if (!earliest || t < earliest_content) {
525                                 earliest_content = t;
526                                 earliest = i;
527                         }
528                 }
529         }
530
531         if (earliest) {
532                 earliest->done = earliest->decoder->pass ();
533                 if (earliest->done && earliest->content->audio) {
534                         /* Flush the Player audio system for this piece */
535                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536                                 audio_flush (earliest, i);
537                         }
538                 }
539         }
540
541         /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
542            NOT to fill gaps within content (the latter is done in ::video())
543         */
544         DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
545
546         /* Work out where to fill video from */
547         optional<DCPTime> video_fill_from;
548         if (_last_video_time && !_playlist->video_content_at(*_last_video_time)) {
549                 /* No seek; fill from the last video time */
550                 video_fill_from = _last_video_time;
551         } else if (_last_seek_time && !_playlist->video_content_at(*_last_seek_time)) {
552                 /* Seek into an empty area; fill from the seek time */
553                 video_fill_from = _last_seek_time;
554         }
555
556         bool filled = false;
557         /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
558            Piece which emits black in spaces (we only emit if we are the earliest thing)
559         */
560         if (earliest && video_fill_from && *video_fill_from < earliest_content && ((fill_towards - *video_fill_from)) > one_video_frame()) {
561                 emit_video (black_player_video_frame(), *video_fill_from);
562                 filled = true;
563         } else if (_playlist->length() == DCPTime()) {
564                 /* Special case of an empty Film; just give one black frame */
565                 emit_video (black_player_video_frame(), DCPTime());
566                 filled = true;
567         }
568
569         optional<DCPTime> audio_fill_from;
570         if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time)) {
571                 /* No seek; fill from the last thing that happened */
572                 audio_fill_from = _last_audio_time;
573         } else if (_last_seek_time && !_playlist->audio_content_at(*_last_seek_time)) {
574                 /* Seek into an empty area; fill from the seek time */
575                 audio_fill_from = _last_seek_time;
576         }
577
578         if (audio_fill_from && audio_fill_from < fill_towards) {
579                 DCPTimePeriod period (*audio_fill_from, fill_towards);
580                 if (period.duration() > one_video_frame()) {
581                         period.to = period.from + one_video_frame();
582                 }
583                 fill_audio (period);
584                 filled = true;
585         }
586
587         /* Emit any audio that is ready */
588
589         DCPTime pull_to = _playlist->length ();
590         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
591                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
592                         pull_to = i->second.last_push_end;
593                 }
594         }
595
596         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
597         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
598                 if (_last_audio_time && i->second < *_last_audio_time) {
599                         /* There has been an accurate seek and we have received some audio before the seek time;
600                            discard it.
601                         */
602                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
603                         if (!cut.first) {
604                                 continue;
605                         }
606                         *i = cut;
607                 }
608
609                 if (_last_audio_time) {
610                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
611                 }
612
613                 emit_audio (i->first, i->second);
614         }
615
616         return !earliest && !filled;
617 }
618
619 optional<PositionImage>
620 Player::subtitles_for_frame (DCPTime time) const
621 {
622         list<PositionImage> subtitles;
623
624         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
625
626                 /* Image subtitles */
627                 list<PositionImage> c = transform_image_subtitles (i.image);
628                 copy (c.begin(), c.end(), back_inserter (subtitles));
629
630                 /* Text subtitles (rendered to an image) */
631                 if (!i.text.empty ()) {
632                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
633                         copy (s.begin(), s.end(), back_inserter (subtitles));
634                 }
635         }
636
637         if (subtitles.empty ()) {
638                 return optional<PositionImage> ();
639         }
640
641         return merge (subtitles);
642 }
643
644 void
645 Player::video (weak_ptr<Piece> wp, ContentVideo video)
646 {
647         shared_ptr<Piece> piece = wp.lock ();
648         if (!piece) {
649                 return;
650         }
651
652         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
653         if (frc.skip && (video.frame % 2) == 1) {
654                 return;
655         }
656
657         /* Time and period of the frame we will emit */
658         DCPTime const time = content_video_to_dcp (piece, video.frame);
659         DCPTimePeriod const period (time, time + one_video_frame());
660
661         /* Discard if it's outside the content's period or if it's before the last accurate seek */
662         if (
663                 time < piece->content->position() ||
664                 time >= piece->content->end() ||
665                 (_last_seek_time && _last_seek_accurate && time < *_last_seek_time)) {
666                 return;
667         }
668
669         /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video.  We have to do this here
670            as in the problematic case we are about to emit a frame which is not contiguous with the previous.
671         */
672
673         if (_last_video_time) {
674                 /* XXX: this may not work for 3D */
675                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*_last_video_time, time), _no_video)) {
676                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
677                                 if (_last_video) {
678                                         emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
679                                 } else {
680                                         emit_video (black_player_video_frame(), j);
681                                 }
682                         }
683                 }
684         }
685
686         _last_video.reset (
687                 new PlayerVideo (
688                         video.image,
689                         piece->content->video->crop (),
690                         piece->content->video->fade (video.frame),
691                         piece->content->video->scale().size (
692                                 piece->content->video, _video_container_size, _film->frame_size ()
693                                 ),
694                         _video_container_size,
695                         video.eyes,
696                         video.part,
697                         piece->content->video->colour_conversion ()
698                         )
699                 );
700
701         emit_video (_last_video, time);
702 }
703
704 void
705 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
706 {
707         shared_ptr<AudioContent> content = piece->content->audio;
708         DCPOMATIC_ASSERT (content);
709
710         shared_ptr<Resampler> r = resampler (content, stream, false);
711         if (!r) {
712                 return;
713         }
714
715         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
716         if (ro.first->frames() == 0) {
717                 return;
718         }
719
720         ContentAudio content_audio;
721         content_audio.audio = ro.first;
722         content_audio.frame = ro.second;
723
724         /* Compute time in the DCP */
725         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
726
727         audio_transform (content, stream, content_audio, time);
728 }
729
730 /** Do our common processing on some audio */
731 void
732 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
733 {
734         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
735
736         /* Gain */
737
738         if (content->gain() != 0) {
739                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
740                 gain->apply_gain (content->gain ());
741                 content_audio.audio = gain;
742         }
743
744         /* Remap */
745
746         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
747         dcp_mapped->make_silent ();
748
749         AudioMapping map = stream->mapping ();
750         for (int i = 0; i < map.input_channels(); ++i) {
751                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
752                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
753                                 dcp_mapped->accumulate_channel (
754                                         content_audio.audio.get(),
755                                         i,
756                                         static_cast<dcp::Channel> (j),
757                                         map.get (i, static_cast<dcp::Channel> (j))
758                                         );
759                         }
760                 }
761         }
762
763         content_audio.audio = dcp_mapped;
764
765         /* Process */
766
767         if (_audio_processor) {
768                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
769         }
770
771         /* Push */
772
773         _audio_merger.push (content_audio.audio, time);
774         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
775         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
776 }
777
778 void
779 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
780 {
781         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
782
783         shared_ptr<Piece> piece = wp.lock ();
784         if (!piece) {
785                 return;
786         }
787
788         shared_ptr<AudioContent> content = piece->content->audio;
789         DCPOMATIC_ASSERT (content);
790
791         /* Resample */
792         if (stream->frame_rate() != content->resampled_frame_rate()) {
793                 shared_ptr<Resampler> r = resampler (content, stream, true);
794                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
795                 if (ro.first->frames() == 0) {
796                         return;
797                 }
798                 content_audio.audio = ro.first;
799                 content_audio.frame = ro.second;
800         }
801
802         /* Compute time in the DCP */
803         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
804         /* And the end of this block in the DCP */
805         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
806
807         /* Pad any gap which may be caused by audio delay */
808         if (_last_audio_time) {
809                 fill_audio (DCPTimePeriod (*_last_audio_time, time));
810         } else if (_last_seek_time && _last_seek_accurate) {
811                 fill_audio (DCPTimePeriod (*_last_seek_time, time));
812         }
813
814         /* Remove anything that comes before the start or after the end of the content */
815         if (time < piece->content->position()) {
816                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
817                 if (!cut.first) {
818                         /* This audio is entirely discarded */
819                         return;
820                 }
821                 content_audio.audio = cut.first;
822                 time = cut.second;
823         } else if (time > piece->content->end()) {
824                 /* Discard it all */
825                 return;
826         } else if (end > piece->content->end()) {
827                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
828                 if (remaining_frames == 0) {
829                         return;
830                 }
831                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
832                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
833                 content_audio.audio = cut;
834         }
835
836         audio_transform (content, stream, content_audio, time);
837 }
838
839 void
840 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
841 {
842         shared_ptr<Piece> piece = wp.lock ();
843         if (!piece) {
844                 return;
845         }
846
847         /* Apply content's subtitle offsets */
848         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
849         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
850
851         /* Apply content's subtitle scale */
852         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
853         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
854
855         /* Apply a corrective translation to keep the subtitle centred after that scale */
856         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
857         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
858
859         PlayerSubtitles ps;
860         ps.image.push_back (subtitle.sub);
861         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
862
863         _active_subtitles.add_from (wp, ps, from);
864 }
865
866 void
867 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
868 {
869         shared_ptr<Piece> piece = wp.lock ();
870         if (!piece) {
871                 return;
872         }
873
874         PlayerSubtitles ps;
875         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
876
877         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
878                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
879                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
880                 float const xs = piece->content->subtitle->x_scale();
881                 float const ys = piece->content->subtitle->y_scale();
882                 float size = s.size();
883
884                 /* Adjust size to express the common part of the scaling;
885                    e.g. if xs = ys = 0.5 we scale size by 2.
886                 */
887                 if (xs > 1e-5 && ys > 1e-5) {
888                         size *= 1 / min (1 / xs, 1 / ys);
889                 }
890                 s.set_size (size);
891
892                 /* Then express aspect ratio changes */
893                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
894                         s.set_aspect_adjust (xs / ys);
895                 }
896
897                 s.set_in (dcp::Time(from.seconds(), 1000));
898                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
899                 ps.add_fonts (piece->content->subtitle->fonts ());
900         }
901
902         _active_subtitles.add_from (wp, ps, from);
903 }
904
905 void
906 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
907 {
908         if (!_active_subtitles.have (wp)) {
909                 return;
910         }
911
912         shared_ptr<Piece> piece = wp.lock ();
913         if (!piece) {
914                 return;
915         }
916
917         DCPTime const dcp_to = content_time_to_dcp (piece, to);
918
919         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
920
921         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
922                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
923         }
924 }
925
926 void
927 Player::seek (DCPTime time, bool accurate)
928 {
929         if (_audio_processor) {
930                 _audio_processor->flush ();
931         }
932
933         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
934                 i->second->flush ();
935                 i->second->reset ();
936         }
937
938         _audio_merger.clear ();
939         _active_subtitles.clear ();
940
941         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
942                 if (time < i->content->position()) {
943                         /* Before; seek to 0 */
944                         i->decoder->seek (ContentTime(), accurate);
945                         i->done = false;
946                 } else if (i->content->position() <= time && time < i->content->end()) {
947                         /* During; seek to position */
948                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
949                         i->done = false;
950                 } else {
951                         /* After; this piece is done */
952                         i->done = true;
953                 }
954         }
955
956         _last_video_time = optional<DCPTime> ();
957         _last_audio_time = optional<DCPTime> ();
958         _last_seek_time = time;
959         _last_seek_accurate = accurate;
960 }
961
962 shared_ptr<Resampler>
963 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
964 {
965         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
966         if (i != _resamplers.end ()) {
967                 return i->second;
968         }
969
970         if (!create) {
971                 return shared_ptr<Resampler> ();
972         }
973
974         LOG_GENERAL (
975                 "Creating new resampler from %1 to %2 with %3 channels",
976                 stream->frame_rate(),
977                 content->resampled_frame_rate(),
978                 stream->channels()
979                 );
980
981         shared_ptr<Resampler> r (
982                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
983                 );
984
985         _resamplers[make_pair(content, stream)] = r;
986         return r;
987 }
988
989 void
990 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
991 {
992         optional<PositionImage> subtitles = subtitles_for_frame (time);
993         if (subtitles) {
994                 pv->set_subtitle (subtitles.get ());
995         }
996         Video (pv, time);
997         _last_video_time = time + one_video_frame();
998         _active_subtitles.clear_before (time);
999 }
1000
1001 void
1002 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1003 {
1004         Audio (data, time);
1005         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1006 }
1007
1008 void
1009 Player::fill_audio (DCPTimePeriod period)
1010 {
1011         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1012                 DCPTime t = i.from;
1013                 while (t < i.to) {
1014                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1015                         Frame const samples = block.frames_round(_film->audio_frame_rate());
1016                         if (samples) {
1017                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1018                                 silence->make_silent ();
1019                                 emit_audio (silence, t);
1020                         }
1021                         t += block;
1022                 }
1023         }
1024 }
1025
1026 DCPTime
1027 Player::one_video_frame () const
1028 {
1029         return DCPTime::from_frames (1, _film->video_frame_rate ());
1030 }
1031
1032 pair<shared_ptr<AudioBuffers>, DCPTime>
1033 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1034 {
1035         DCPTime const discard_time = discard_to - time;
1036         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1037         Frame remaining_frames = audio->frames() - discard_frames;
1038         if (remaining_frames <= 0) {
1039                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1040         }
1041         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1042         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1043         return make_pair(cut, time + discard_time);
1044 }