Fix failure to fill FFmpeg gaps at the start of films.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _last_seek_accurate (true)
91         , _audio_merger (_film->audio_frame_rate())
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 void
104 Player::setup_pieces ()
105 {
106         _pieces.clear ();
107
108         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109
110                 if (!i->paths_valid ()) {
111                         continue;
112                 }
113
114                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116
117                 if (!decoder) {
118                         /* Not something that we can decode; e.g. Atmos content */
119                         continue;
120                 }
121
122                 if (decoder->video && _ignore_video) {
123                         decoder->video->set_ignore ();
124                 }
125
126                 if (decoder->audio && _ignore_audio) {
127                         decoder->audio->set_ignore ();
128                 }
129
130                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131                 if (dcp && _play_referenced) {
132                         dcp->set_decode_referenced ();
133                 }
134
135                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136                 _pieces.push_back (piece);
137
138                 if (decoder->video) {
139                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140                 }
141
142                 if (decoder->audio) {
143                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144                 }
145
146                 if (decoder->subtitle) {
147                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
150                 }
151         }
152
153         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154                 if (i->content->audio) {
155                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156                                 _stream_states[j] = StreamState (i, i->content->position ());
157                         }
158                 }
159         }
160
161         if (!_play_referenced) {
162                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164                         if (dc) {
165                                 if (dc->reference_video()) {
166                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167                                 }
168                                 if (dc->reference_audio()) {
169                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
170                                 }
171                         }
172                 }
173         }
174
175         _last_video_time = optional<DCPTime> ();
176         _last_audio_time = optional<DCPTime> ();
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::FRAME_TYPE ||
195                 property == DCPContentProperty::NEEDS_ASSETS ||
196                 property == DCPContentProperty::NEEDS_KDM ||
197                 property == SubtitleContentProperty::COLOUR ||
198                 property == SubtitleContentProperty::OUTLINE ||
199                 property == SubtitleContentProperty::SHADOW ||
200                 property == SubtitleContentProperty::EFFECT_COLOUR ||
201                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202                 property == VideoContentProperty::COLOUR_CONVERSION
203                 ) {
204
205                 _have_valid_pieces = false;
206                 Changed (frequent);
207
208         } else if (
209                 property == SubtitleContentProperty::LINE_SPACING ||
210                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211                 property == SubtitleContentProperty::Y_SCALE ||
212                 property == SubtitleContentProperty::FADE_IN ||
213                 property == SubtitleContentProperty::FADE_OUT ||
214                 property == ContentProperty::VIDEO_FRAME_RATE ||
215                 property == SubtitleContentProperty::USE ||
216                 property == SubtitleContentProperty::X_OFFSET ||
217                 property == SubtitleContentProperty::Y_OFFSET ||
218                 property == SubtitleContentProperty::X_SCALE ||
219                 property == SubtitleContentProperty::FONTS ||
220                 property == VideoContentProperty::CROP ||
221                 property == VideoContentProperty::SCALE ||
222                 property == VideoContentProperty::FADE_IN ||
223                 property == VideoContentProperty::FADE_OUT
224                 ) {
225
226                 Changed (frequent);
227         }
228 }
229
230 void
231 Player::set_video_container_size (dcp::Size s)
232 {
233         if (s == _video_container_size) {
234                 return;
235         }
236
237         _video_container_size = s;
238
239         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240         _black_image->make_black ();
241
242         Changed (false);
243 }
244
245 void
246 Player::playlist_changed ()
247 {
248         _have_valid_pieces = false;
249         Changed (false);
250 }
251
252 void
253 Player::film_changed (Film::Property p)
254 {
255         /* Here we should notice Film properties that affect our output, and
256            alert listeners that our output now would be different to how it was
257            last time we were run.
258         */
259
260         if (p == Film::CONTAINER) {
261                 Changed (false);
262         } else if (p == Film::VIDEO_FRAME_RATE) {
263                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264                    so we need new pieces here.
265                 */
266                 _have_valid_pieces = false;
267                 Changed (false);
268         } else if (p == Film::AUDIO_PROCESSOR) {
269                 if (_film->audio_processor ()) {
270                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
271                 }
272         }
273 }
274
275 list<PositionImage>
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 {
278         list<PositionImage> all;
279
280         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
281                 if (!i->image) {
282                         continue;
283                 }
284
285                 /* We will scale the subtitle up to fit _video_container_size */
286                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287
288                 /* Then we need a corrective translation, consisting of two parts:
289                  *
290                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
291                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292                  *
293                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296                  *
297                  * Combining these two translations gives these expressions.
298                  */
299
300                 all.push_back (
301                         PositionImage (
302                                 i->image->scale (
303                                         scaled_size,
304                                         dcp::YUV_TO_RGB_REC601,
305                                         i->image->pixel_format (),
306                                         true,
307                                         _fast
308                                         ),
309                                 Position<int> (
310                                         lrint (_video_container_size.width * i->rectangle.x),
311                                         lrint (_video_container_size.height * i->rectangle.y)
312                                         )
313                                 )
314                         );
315         }
316
317         return all;
318 }
319
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
322 {
323         return shared_ptr<PlayerVideo> (
324                 new PlayerVideo (
325                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326                         Crop (),
327                         optional<double> (),
328                         _video_container_size,
329                         _video_container_size,
330                         EYES_BOTH,
331                         PART_WHOLE,
332                         PresetColourConversion::all().front().conversion
333                 )
334         );
335 }
336
337 Frame
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 {
340         DCPTime s = t - piece->content->position ();
341         s = min (piece->content->length_after_trim(), s);
342         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343
344         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345            then convert that ContentTime to frames at the content's rate.  However this fails for
346            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
347            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348
349            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350         */
351         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
352 }
353
354 DCPTime
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 {
357         /* See comment in dcp_to_content_video */
358         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359         return max (DCPTime (), d + piece->content->position ());
360 }
361
362 Frame
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         /* See notes in dcp_to_content_video */
368         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
369 }
370
371 DCPTime
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376         return max (DCPTime (), d + piece->content->position ());
377 }
378
379 ContentTime
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(), s);
384         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
385 }
386
387 DCPTime
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 {
390         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 }
392
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         list<shared_ptr<Font> > fonts;
401         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402                 if (p->content->subtitle) {
403                         /* XXX: things may go wrong if there are duplicate font IDs
404                            with different font files.
405                         */
406                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407                         copy (f.begin(), f.end(), back_inserter (fonts));
408                 }
409         }
410
411         return fonts;
412 }
413
414 /** Set this player never to produce any video data */
415 void
416 Player::set_ignore_video ()
417 {
418         _ignore_video = true;
419 }
420
421 /** Set whether or not this player should always burn text subtitles into the image,
422  *  regardless of the content settings.
423  *  @param burn true to always burn subtitles, false to obey content settings.
424  */
425 void
426 Player::set_always_burn_subtitles (bool burn)
427 {
428         _always_burn_subtitles = burn;
429 }
430
431 void
432 Player::set_fast ()
433 {
434         _fast = true;
435         _have_valid_pieces = false;
436 }
437
438 void
439 Player::set_play_referenced ()
440 {
441         _play_referenced = true;
442         _have_valid_pieces = false;
443 }
444
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
447 {
448         list<ReferencedReelAsset> a;
449
450         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
452                 if (!j) {
453                         continue;
454                 }
455
456                 scoped_ptr<DCPDecoder> decoder;
457                 try {
458                         decoder.reset (new DCPDecoder (j, _film->log()));
459                 } catch (...) {
460                         return a;
461                 }
462
463                 int64_t offset = 0;
464                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465
466                         DCPOMATIC_ASSERT (j->video_frame_rate ());
467                         double const cfr = j->video_frame_rate().get();
468                         Frame const trim_start = j->trim_start().frames_round (cfr);
469                         Frame const trim_end = j->trim_end().frames_round (cfr);
470                         int const ffr = _film->video_frame_rate ();
471
472                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473                         if (j->reference_video ()) {
474                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475                                 DCPOMATIC_ASSERT (ra);
476                                 ra->set_entry_point (ra->entry_point() + trim_start);
477                                 ra->set_duration (ra->duration() - trim_start - trim_end);
478                                 a.push_back (
479                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480                                         );
481                         }
482
483                         if (j->reference_audio ()) {
484                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485                                 DCPOMATIC_ASSERT (ra);
486                                 ra->set_entry_point (ra->entry_point() + trim_start);
487                                 ra->set_duration (ra->duration() - trim_start - trim_end);
488                                 a.push_back (
489                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490                                         );
491                         }
492
493                         if (j->reference_subtitle ()) {
494                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495                                 DCPOMATIC_ASSERT (ra);
496                                 ra->set_entry_point (ra->entry_point() + trim_start);
497                                 ra->set_duration (ra->duration() - trim_start - trim_end);
498                                 a.push_back (
499                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500                                         );
501                         }
502
503                         /* Assume that main picture duration is the length of the reel */
504                         offset += k->main_picture()->duration ();
505                 }
506         }
507
508         return a;
509 }
510
511 bool
512 Player::pass ()
513 {
514         if (!_have_valid_pieces) {
515                 setup_pieces ();
516         }
517
518         shared_ptr<Piece> earliest;
519         DCPTime earliest_content;
520
521         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522                 if (!i->done) {
523                         DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524                         if (!earliest || t < earliest_content) {
525                                 earliest_content = t;
526                                 earliest = i;
527                         }
528                 }
529         }
530
531         if (earliest) {
532                 earliest->done = earliest->decoder->pass ();
533                 if (earliest->done && earliest->content->audio) {
534                         /* Flush the Player audio system for this piece */
535                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536                                 audio_flush (earliest, i);
537                         }
538                 }
539         }
540
541         /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
542            NOT to fill gaps within content (the latter is done in ::video())
543         */
544         DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
545
546         /* Work out where to fill video from */
547         optional<DCPTime> video_fill_from;
548         if (_last_video_time && !_playlist->video_content_at(*_last_video_time)) {
549                 /* No seek; fill from the last video time */
550                 video_fill_from = _last_video_time;
551         } else if (_last_seek_time && !_playlist->video_content_at(*_last_seek_time)) {
552                 /* Seek into an empty area; fill from the seek time */
553                 video_fill_from = _last_seek_time;
554         }
555
556         bool filled = false;
557         /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
558            Piece which emits black in spaces (we only emit if we are the earliest thing)
559         */
560         if (earliest && video_fill_from && *video_fill_from < earliest_content && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
561                 emit_video (black_player_video_frame(), *video_fill_from);
562                 filled = true;
563         } else if (_playlist->length() == DCPTime()) {
564                 /* Special case of an empty Film; just give one black frame */
565                 emit_video (black_player_video_frame(), DCPTime());
566                 filled = true;
567         }
568
569         optional<DCPTime> audio_fill_from;
570         if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time)) {
571                 /* No seek; fill from the last thing that happened */
572                 audio_fill_from = _last_audio_time;
573         } else if (_last_seek_time && !_playlist->audio_content_at(*_last_seek_time)) {
574                 /* Seek into an empty area; fill from the seek time */
575                 audio_fill_from = _last_seek_time;
576         }
577
578         if (audio_fill_from && audio_fill_from < fill_towards) {
579                 DCPTimePeriod period (*audio_fill_from, fill_towards);
580                 if (period.duration() > one_video_frame()) {
581                         period.to = period.from + one_video_frame();
582                 }
583                 fill_audio (period);
584                 filled = true;
585         }
586
587         /* Emit any audio that is ready */
588
589         DCPTime pull_to = _playlist->length ();
590         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
591                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
592                         pull_to = i->second.last_push_end;
593                 }
594         }
595
596         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
597         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
598                 if (_last_audio_time && i->second < *_last_audio_time) {
599                         /* There has been an accurate seek and we have received some audio before the seek time;
600                            discard it.
601                         */
602                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
603                         if (!cut.first) {
604                                 continue;
605                         }
606                         *i = cut;
607                 }
608
609                 if (_last_audio_time) {
610                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
611                 }
612
613                 emit_audio (i->first, i->second);
614         }
615
616         return !earliest && !filled;
617 }
618
619 optional<PositionImage>
620 Player::subtitles_for_frame (DCPTime time) const
621 {
622         list<PositionImage> subtitles;
623
624         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
625
626                 /* Image subtitles */
627                 list<PositionImage> c = transform_image_subtitles (i.image);
628                 copy (c.begin(), c.end(), back_inserter (subtitles));
629
630                 /* Text subtitles (rendered to an image) */
631                 if (!i.text.empty ()) {
632                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
633                         copy (s.begin(), s.end(), back_inserter (subtitles));
634                 }
635         }
636
637         if (subtitles.empty ()) {
638                 return optional<PositionImage> ();
639         }
640
641         return merge (subtitles);
642 }
643
644 void
645 Player::video (weak_ptr<Piece> wp, ContentVideo video)
646 {
647         shared_ptr<Piece> piece = wp.lock ();
648         if (!piece) {
649                 return;
650         }
651
652         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
653         if (frc.skip && (video.frame % 2) == 1) {
654                 return;
655         }
656
657         /* Time and period of the frame we will emit */
658         DCPTime const time = content_video_to_dcp (piece, video.frame);
659         DCPTimePeriod const period (time, time + one_video_frame());
660
661         /* Discard if it's outside the content's period or if it's before the last accurate seek */
662         if (
663                 time < piece->content->position() ||
664                 time >= piece->content->end() ||
665                 (_last_seek_time && _last_seek_accurate && time < *_last_seek_time)) {
666                 return;
667         }
668
669         /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video.  We have to do this here
670            as in the problematic case we are about to emit a frame which is not contiguous with the previous.
671         */
672
673         optional<DCPTime> fill_to;
674         if (_last_video_time) {
675                 fill_to = _last_video_time;
676         } else if (_last_seek_time && _last_seek_accurate) {
677                 fill_to = _last_seek_time;
678         }
679
680         if (fill_to) {
681                 /* XXX: this may not work for 3D */
682                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
683                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
684                                 if (_last_video) {
685                                         emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
686                                 } else {
687                                         emit_video (black_player_video_frame(), j);
688                                 }
689                         }
690                 }
691         }
692
693         _last_video.reset (
694                 new PlayerVideo (
695                         video.image,
696                         piece->content->video->crop (),
697                         piece->content->video->fade (video.frame),
698                         piece->content->video->scale().size (
699                                 piece->content->video, _video_container_size, _film->frame_size ()
700                                 ),
701                         _video_container_size,
702                         video.eyes,
703                         video.part,
704                         piece->content->video->colour_conversion ()
705                         )
706                 );
707
708         emit_video (_last_video, time);
709 }
710
711 void
712 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
713 {
714         shared_ptr<AudioContent> content = piece->content->audio;
715         DCPOMATIC_ASSERT (content);
716
717         shared_ptr<Resampler> r = resampler (content, stream, false);
718         if (!r) {
719                 return;
720         }
721
722         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
723         if (ro.first->frames() == 0) {
724                 return;
725         }
726
727         ContentAudio content_audio;
728         content_audio.audio = ro.first;
729         content_audio.frame = ro.second;
730
731         /* Compute time in the DCP */
732         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
733
734         audio_transform (content, stream, content_audio, time);
735 }
736
737 /** Do our common processing on some audio */
738 void
739 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
740 {
741         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
742
743         /* Gain */
744
745         if (content->gain() != 0) {
746                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
747                 gain->apply_gain (content->gain ());
748                 content_audio.audio = gain;
749         }
750
751         /* Remap */
752
753         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
754         dcp_mapped->make_silent ();
755
756         AudioMapping map = stream->mapping ();
757         for (int i = 0; i < map.input_channels(); ++i) {
758                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
759                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
760                                 dcp_mapped->accumulate_channel (
761                                         content_audio.audio.get(),
762                                         i,
763                                         static_cast<dcp::Channel> (j),
764                                         map.get (i, static_cast<dcp::Channel> (j))
765                                         );
766                         }
767                 }
768         }
769
770         content_audio.audio = dcp_mapped;
771
772         /* Process */
773
774         if (_audio_processor) {
775                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
776         }
777
778         /* Push */
779
780         _audio_merger.push (content_audio.audio, time);
781         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
782         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
783 }
784
785 void
786 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
787 {
788         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
789
790         shared_ptr<Piece> piece = wp.lock ();
791         if (!piece) {
792                 return;
793         }
794
795         shared_ptr<AudioContent> content = piece->content->audio;
796         DCPOMATIC_ASSERT (content);
797
798         /* Resample */
799         if (stream->frame_rate() != content->resampled_frame_rate()) {
800                 shared_ptr<Resampler> r = resampler (content, stream, true);
801                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
802                 if (ro.first->frames() == 0) {
803                         return;
804                 }
805                 content_audio.audio = ro.first;
806                 content_audio.frame = ro.second;
807         }
808
809         /* Compute time in the DCP */
810         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
811         /* And the end of this block in the DCP */
812         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
813
814         /* Pad any gap which may be caused by audio delay */
815         if (_last_audio_time) {
816                 fill_audio (DCPTimePeriod (*_last_audio_time, time));
817         } else if (_last_seek_time && _last_seek_accurate) {
818                 fill_audio (DCPTimePeriod (*_last_seek_time, time));
819         }
820
821         /* Remove anything that comes before the start or after the end of the content */
822         if (time < piece->content->position()) {
823                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
824                 if (!cut.first) {
825                         /* This audio is entirely discarded */
826                         return;
827                 }
828                 content_audio.audio = cut.first;
829                 time = cut.second;
830         } else if (time > piece->content->end()) {
831                 /* Discard it all */
832                 return;
833         } else if (end > piece->content->end()) {
834                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
835                 if (remaining_frames == 0) {
836                         return;
837                 }
838                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
839                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
840                 content_audio.audio = cut;
841         }
842
843         audio_transform (content, stream, content_audio, time);
844 }
845
846 void
847 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
848 {
849         shared_ptr<Piece> piece = wp.lock ();
850         if (!piece) {
851                 return;
852         }
853
854         /* Apply content's subtitle offsets */
855         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
856         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
857
858         /* Apply content's subtitle scale */
859         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
860         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
861
862         /* Apply a corrective translation to keep the subtitle centred after that scale */
863         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
864         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
865
866         PlayerSubtitles ps;
867         ps.image.push_back (subtitle.sub);
868         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
869
870         _active_subtitles.add_from (wp, ps, from);
871 }
872
873 void
874 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
875 {
876         shared_ptr<Piece> piece = wp.lock ();
877         if (!piece) {
878                 return;
879         }
880
881         PlayerSubtitles ps;
882         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
883
884         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
885                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
886                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
887                 float const xs = piece->content->subtitle->x_scale();
888                 float const ys = piece->content->subtitle->y_scale();
889                 float size = s.size();
890
891                 /* Adjust size to express the common part of the scaling;
892                    e.g. if xs = ys = 0.5 we scale size by 2.
893                 */
894                 if (xs > 1e-5 && ys > 1e-5) {
895                         size *= 1 / min (1 / xs, 1 / ys);
896                 }
897                 s.set_size (size);
898
899                 /* Then express aspect ratio changes */
900                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
901                         s.set_aspect_adjust (xs / ys);
902                 }
903
904                 s.set_in (dcp::Time(from.seconds(), 1000));
905                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
906                 ps.add_fonts (piece->content->subtitle->fonts ());
907         }
908
909         _active_subtitles.add_from (wp, ps, from);
910 }
911
912 void
913 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
914 {
915         if (!_active_subtitles.have (wp)) {
916                 return;
917         }
918
919         shared_ptr<Piece> piece = wp.lock ();
920         if (!piece) {
921                 return;
922         }
923
924         DCPTime const dcp_to = content_time_to_dcp (piece, to);
925
926         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
927
928         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
929                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
930         }
931 }
932
933 void
934 Player::seek (DCPTime time, bool accurate)
935 {
936         if (_audio_processor) {
937                 _audio_processor->flush ();
938         }
939
940         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
941                 i->second->flush ();
942                 i->second->reset ();
943         }
944
945         _audio_merger.clear ();
946         _active_subtitles.clear ();
947
948         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
949                 if (time < i->content->position()) {
950                         /* Before; seek to 0 */
951                         i->decoder->seek (ContentTime(), accurate);
952                         i->done = false;
953                 } else if (i->content->position() <= time && time < i->content->end()) {
954                         /* During; seek to position */
955                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
956                         i->done = false;
957                 } else {
958                         /* After; this piece is done */
959                         i->done = true;
960                 }
961         }
962
963         _last_video_time = optional<DCPTime> ();
964         _last_audio_time = optional<DCPTime> ();
965         _last_seek_time = time;
966         _last_seek_accurate = accurate;
967 }
968
969 shared_ptr<Resampler>
970 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
971 {
972         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
973         if (i != _resamplers.end ()) {
974                 return i->second;
975         }
976
977         if (!create) {
978                 return shared_ptr<Resampler> ();
979         }
980
981         LOG_GENERAL (
982                 "Creating new resampler from %1 to %2 with %3 channels",
983                 stream->frame_rate(),
984                 content->resampled_frame_rate(),
985                 stream->channels()
986                 );
987
988         shared_ptr<Resampler> r (
989                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
990                 );
991
992         _resamplers[make_pair(content, stream)] = r;
993         return r;
994 }
995
996 void
997 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
998 {
999         optional<PositionImage> subtitles = subtitles_for_frame (time);
1000         if (subtitles) {
1001                 pv->set_subtitle (subtitles.get ());
1002         }
1003         Video (pv, time);
1004         _last_video_time = time + one_video_frame();
1005         _active_subtitles.clear_before (time);
1006 }
1007
1008 void
1009 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1010 {
1011         Audio (data, time);
1012         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1013 }
1014
1015 void
1016 Player::fill_audio (DCPTimePeriod period)
1017 {
1018         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1019                 DCPTime t = i.from;
1020                 while (t < i.to) {
1021                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1022                         Frame const samples = block.frames_round(_film->audio_frame_rate());
1023                         if (samples) {
1024                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1025                                 silence->make_silent ();
1026                                 emit_audio (silence, t);
1027                         }
1028                         t += block;
1029                 }
1030         }
1031 }
1032
1033 DCPTime
1034 Player::one_video_frame () const
1035 {
1036         return DCPTime::from_frames (1, _film->video_frame_rate ());
1037 }
1038
1039 pair<shared_ptr<AudioBuffers>, DCPTime>
1040 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1041 {
1042         DCPTime const discard_time = discard_to - time;
1043         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1044         Frame remaining_frames = audio->frames() - discard_frames;
1045         if (remaining_frames <= 0) {
1046                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1047         }
1048         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1049         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1050         return make_pair(cut, time + discard_time);
1051 }