Correctly stop when we have no decoders and we haven't filled.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _last_seek_accurate (true)
91         , _audio_merger (_film->audio_frame_rate())
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 void
104 Player::setup_pieces ()
105 {
106         _pieces.clear ();
107
108         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109
110                 if (!i->paths_valid ()) {
111                         continue;
112                 }
113
114                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116
117                 if (!decoder) {
118                         /* Not something that we can decode; e.g. Atmos content */
119                         continue;
120                 }
121
122                 if (decoder->video && _ignore_video) {
123                         decoder->video->set_ignore ();
124                 }
125
126                 if (decoder->audio && _ignore_audio) {
127                         decoder->audio->set_ignore ();
128                 }
129
130                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131                 if (dcp && _play_referenced) {
132                         dcp->set_decode_referenced ();
133                 }
134
135                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136                 _pieces.push_back (piece);
137
138                 if (decoder->video) {
139                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140                 }
141
142                 if (decoder->audio) {
143                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144                 }
145
146                 if (decoder->subtitle) {
147                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
150                 }
151         }
152
153         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154                 if (i->content->audio) {
155                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156                                 _stream_states[j] = StreamState (i, i->content->position ());
157                         }
158                 }
159         }
160
161         if (!_play_referenced) {
162                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164                         if (dc) {
165                                 if (dc->reference_video()) {
166                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167                                 }
168                                 if (dc->reference_audio()) {
169                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
170                                 }
171                         }
172                 }
173         }
174
175         _last_video_time = optional<DCPTime> ();
176         _last_audio_time = optional<DCPTime> ();
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::FRAME_TYPE ||
195                 property == DCPContentProperty::NEEDS_ASSETS ||
196                 property == DCPContentProperty::NEEDS_KDM ||
197                 property == SubtitleContentProperty::COLOUR ||
198                 property == SubtitleContentProperty::OUTLINE ||
199                 property == SubtitleContentProperty::SHADOW ||
200                 property == SubtitleContentProperty::EFFECT_COLOUR ||
201                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202                 property == VideoContentProperty::COLOUR_CONVERSION
203                 ) {
204
205                 _have_valid_pieces = false;
206                 Changed (frequent);
207
208         } else if (
209                 property == SubtitleContentProperty::LINE_SPACING ||
210                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211                 property == SubtitleContentProperty::Y_SCALE ||
212                 property == SubtitleContentProperty::FADE_IN ||
213                 property == SubtitleContentProperty::FADE_OUT ||
214                 property == ContentProperty::VIDEO_FRAME_RATE ||
215                 property == SubtitleContentProperty::USE ||
216                 property == SubtitleContentProperty::X_OFFSET ||
217                 property == SubtitleContentProperty::Y_OFFSET ||
218                 property == SubtitleContentProperty::X_SCALE ||
219                 property == SubtitleContentProperty::FONTS ||
220                 property == VideoContentProperty::CROP ||
221                 property == VideoContentProperty::SCALE ||
222                 property == VideoContentProperty::FADE_IN ||
223                 property == VideoContentProperty::FADE_OUT
224                 ) {
225
226                 Changed (frequent);
227         }
228 }
229
230 void
231 Player::set_video_container_size (dcp::Size s)
232 {
233         if (s == _video_container_size) {
234                 return;
235         }
236
237         _video_container_size = s;
238
239         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240         _black_image->make_black ();
241
242         Changed (false);
243 }
244
245 void
246 Player::playlist_changed ()
247 {
248         _have_valid_pieces = false;
249         Changed (false);
250 }
251
252 void
253 Player::film_changed (Film::Property p)
254 {
255         /* Here we should notice Film properties that affect our output, and
256            alert listeners that our output now would be different to how it was
257            last time we were run.
258         */
259
260         if (p == Film::CONTAINER) {
261                 Changed (false);
262         } else if (p == Film::VIDEO_FRAME_RATE) {
263                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264                    so we need new pieces here.
265                 */
266                 _have_valid_pieces = false;
267                 Changed (false);
268         } else if (p == Film::AUDIO_PROCESSOR) {
269                 if (_film->audio_processor ()) {
270                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
271                 }
272         }
273 }
274
275 list<PositionImage>
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 {
278         list<PositionImage> all;
279
280         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
281                 if (!i->image) {
282                         continue;
283                 }
284
285                 /* We will scale the subtitle up to fit _video_container_size */
286                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287
288                 /* Then we need a corrective translation, consisting of two parts:
289                  *
290                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
291                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292                  *
293                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296                  *
297                  * Combining these two translations gives these expressions.
298                  */
299
300                 all.push_back (
301                         PositionImage (
302                                 i->image->scale (
303                                         scaled_size,
304                                         dcp::YUV_TO_RGB_REC601,
305                                         i->image->pixel_format (),
306                                         true,
307                                         _fast
308                                         ),
309                                 Position<int> (
310                                         lrint (_video_container_size.width * i->rectangle.x),
311                                         lrint (_video_container_size.height * i->rectangle.y)
312                                         )
313                                 )
314                         );
315         }
316
317         return all;
318 }
319
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
322 {
323         return shared_ptr<PlayerVideo> (
324                 new PlayerVideo (
325                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326                         Crop (),
327                         optional<double> (),
328                         _video_container_size,
329                         _video_container_size,
330                         EYES_BOTH,
331                         PART_WHOLE,
332                         PresetColourConversion::all().front().conversion
333                 )
334         );
335 }
336
337 Frame
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 {
340         DCPTime s = t - piece->content->position ();
341         s = min (piece->content->length_after_trim(), s);
342         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343
344         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345            then convert that ContentTime to frames at the content's rate.  However this fails for
346            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
347            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348
349            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350         */
351         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
352 }
353
354 DCPTime
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 {
357         /* See comment in dcp_to_content_video */
358         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359         return max (DCPTime (), d + piece->content->position ());
360 }
361
362 Frame
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         /* See notes in dcp_to_content_video */
368         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
369 }
370
371 DCPTime
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376         return max (DCPTime (), d + piece->content->position ());
377 }
378
379 ContentTime
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(), s);
384         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
385 }
386
387 DCPTime
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 {
390         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 }
392
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         list<shared_ptr<Font> > fonts;
401         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402                 if (p->content->subtitle) {
403                         /* XXX: things may go wrong if there are duplicate font IDs
404                            with different font files.
405                         */
406                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407                         copy (f.begin(), f.end(), back_inserter (fonts));
408                 }
409         }
410
411         return fonts;
412 }
413
414 /** Set this player never to produce any video data */
415 void
416 Player::set_ignore_video ()
417 {
418         _ignore_video = true;
419 }
420
421 /** Set whether or not this player should always burn text subtitles into the image,
422  *  regardless of the content settings.
423  *  @param burn true to always burn subtitles, false to obey content settings.
424  */
425 void
426 Player::set_always_burn_subtitles (bool burn)
427 {
428         _always_burn_subtitles = burn;
429 }
430
431 void
432 Player::set_fast ()
433 {
434         _fast = true;
435         _have_valid_pieces = false;
436 }
437
438 void
439 Player::set_play_referenced ()
440 {
441         _play_referenced = true;
442         _have_valid_pieces = false;
443 }
444
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
447 {
448         list<ReferencedReelAsset> a;
449
450         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
452                 if (!j) {
453                         continue;
454                 }
455
456                 scoped_ptr<DCPDecoder> decoder;
457                 try {
458                         decoder.reset (new DCPDecoder (j, _film->log()));
459                 } catch (...) {
460                         return a;
461                 }
462
463                 int64_t offset = 0;
464                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465
466                         DCPOMATIC_ASSERT (j->video_frame_rate ());
467                         double const cfr = j->video_frame_rate().get();
468                         Frame const trim_start = j->trim_start().frames_round (cfr);
469                         Frame const trim_end = j->trim_end().frames_round (cfr);
470                         int const ffr = _film->video_frame_rate ();
471
472                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473                         if (j->reference_video ()) {
474                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475                                 DCPOMATIC_ASSERT (ra);
476                                 ra->set_entry_point (ra->entry_point() + trim_start);
477                                 ra->set_duration (ra->duration() - trim_start - trim_end);
478                                 a.push_back (
479                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480                                         );
481                         }
482
483                         if (j->reference_audio ()) {
484                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485                                 DCPOMATIC_ASSERT (ra);
486                                 ra->set_entry_point (ra->entry_point() + trim_start);
487                                 ra->set_duration (ra->duration() - trim_start - trim_end);
488                                 a.push_back (
489                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490                                         );
491                         }
492
493                         if (j->reference_subtitle ()) {
494                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495                                 DCPOMATIC_ASSERT (ra);
496                                 ra->set_entry_point (ra->entry_point() + trim_start);
497                                 ra->set_duration (ra->duration() - trim_start - trim_end);
498                                 a.push_back (
499                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500                                         );
501                         }
502
503                         /* Assume that main picture duration is the length of the reel */
504                         offset += k->main_picture()->duration ();
505                 }
506         }
507
508         return a;
509 }
510
511 bool
512 Player::pass ()
513 {
514         if (!_have_valid_pieces) {
515                 setup_pieces ();
516         }
517
518         shared_ptr<Piece> earliest;
519         DCPTime earliest_content;
520
521         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522                 if (!i->done) {
523                         DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524                         if (!earliest || t < earliest_content) {
525                                 earliest_content = t;
526                                 earliest = i;
527                         }
528                 }
529         }
530
531         if (earliest) {
532                 earliest->done = earliest->decoder->pass ();
533                 if (earliest->done && earliest->content->audio) {
534                         /* Flush the Player audio system for this piece */
535                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536                                 audio_flush (earliest, i);
537                         }
538                 }
539         }
540
541         DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
542
543         optional<DCPTime> fill_from;
544         if (_last_video_time) {
545                 /* No seek; fill towards the next thing that might happen (or the end of the playlist) */
546                 fill_from = _last_video_time;
547         } else if (_last_seek_time && !_playlist->video_content_at(_last_seek_time.get())) {
548                 /* Seek into an empty area; fill from the seek time */
549                 fill_from = _last_seek_time;
550         }
551
552         bool filled = false;
553         if (fill_from && ((fill_towards - fill_from.get())) > one_video_frame()) {
554                 emit_video (black_player_video_frame(), fill_from.get());
555                 filled = true;
556         } else if (_playlist->length() == DCPTime()) {
557                 emit_video (black_player_video_frame(), DCPTime());
558                 filled = true;
559         }
560
561         if (!earliest && !filled) {
562                 return true;
563         }
564
565         /* Emit any audio that is ready */
566
567         DCPTime pull_from = _playlist->length ();
568         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
569                 if (!i->second.piece->done && i->second.last_push_end < pull_from) {
570                         pull_from = i->second.last_push_end;
571                 }
572         }
573
574         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
575         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
576                 if (_last_audio_time && i->second < _last_audio_time.get()) {
577                         /* There has been an accurate seek and we have received some audio before the seek time;
578                            discard it.
579                         */
580                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
581                         if (!cut.first) {
582                                 continue;
583                         }
584                         *i = cut;
585                 }
586
587                 if (_last_audio_time) {
588                         fill_audio (DCPTimePeriod (_last_audio_time.get(), i->second));
589                 }
590
591                 Audio (i->first, i->second);
592                 _last_audio_time = i->second + DCPTime::from_frames(i->first->frames(), _film->audio_frame_rate());
593         }
594
595         return false;
596 }
597
598 optional<PositionImage>
599 Player::subtitles_for_frame (DCPTime time) const
600 {
601         list<PositionImage> subtitles;
602
603         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
604
605                 /* Image subtitles */
606                 list<PositionImage> c = transform_image_subtitles (i.image);
607                 copy (c.begin(), c.end(), back_inserter (subtitles));
608
609                 /* Text subtitles (rendered to an image) */
610                 if (!i.text.empty ()) {
611                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
612                         copy (s.begin(), s.end(), back_inserter (subtitles));
613                 }
614         }
615
616         if (subtitles.empty ()) {
617                 return optional<PositionImage> ();
618         }
619
620         return merge (subtitles);
621 }
622
623 void
624 Player::video (weak_ptr<Piece> wp, ContentVideo video)
625 {
626         shared_ptr<Piece> piece = wp.lock ();
627         if (!piece) {
628                 return;
629         }
630
631         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
632         if (frc.skip && (video.frame % 2) == 1) {
633                 return;
634         }
635
636         /* Time and period of the frame we will emit */
637         DCPTime const time = content_video_to_dcp (piece, video.frame);
638         DCPTimePeriod const period (time, time + one_video_frame());
639
640         /* Discard if it's outside the content's period or if it's before the last accurate seek */
641         if (
642                 time < piece->content->position() ||
643                 time >= piece->content->end() ||
644                 (_last_seek_time && _last_seek_accurate && time < _last_seek_time.get())) {
645                 return;
646         }
647
648         /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video.  We have to do this here
649            as in the problematic case we are about to emit a frame which is not contiguous with the previous.
650         */
651
652         if (_last_video_time) {
653                 /* XXX: this may not work for 3D */
654                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (_last_video_time.get(), time), _no_video)) {
655                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
656                                 if (_last_video) {
657                                         emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
658                                 } else {
659                                         emit_video (black_player_video_frame(), j);
660                                 }
661                         }
662                 }
663         }
664
665         _last_video.reset (
666                 new PlayerVideo (
667                         video.image,
668                         piece->content->video->crop (),
669                         piece->content->video->fade (video.frame),
670                         piece->content->video->scale().size (
671                                 piece->content->video, _video_container_size, _film->frame_size ()
672                                 ),
673                         _video_container_size,
674                         video.eyes,
675                         video.part,
676                         piece->content->video->colour_conversion ()
677                         )
678                 );
679
680         emit_video (_last_video, time);
681 }
682
683 void
684 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
685 {
686         shared_ptr<AudioContent> content = piece->content->audio;
687         DCPOMATIC_ASSERT (content);
688
689         shared_ptr<Resampler> r = resampler (content, stream, false);
690         if (!r) {
691                 return;
692         }
693
694         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
695         if (ro.first->frames() == 0) {
696                 return;
697         }
698
699         ContentAudio content_audio;
700         content_audio.audio = ro.first;
701         content_audio.frame = ro.second;
702
703         /* Compute time in the DCP */
704         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
705
706         audio_transform (content, stream, content_audio, time);
707 }
708
709 /** Do our common processing on some audio */
710 void
711 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
712 {
713         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
714
715         /* Gain */
716
717         if (content->gain() != 0) {
718                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
719                 gain->apply_gain (content->gain ());
720                 content_audio.audio = gain;
721         }
722
723         /* Remap */
724
725         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
726         dcp_mapped->make_silent ();
727
728         AudioMapping map = stream->mapping ();
729         for (int i = 0; i < map.input_channels(); ++i) {
730                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
731                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
732                                 dcp_mapped->accumulate_channel (
733                                         content_audio.audio.get(),
734                                         i,
735                                         static_cast<dcp::Channel> (j),
736                                         map.get (i, static_cast<dcp::Channel> (j))
737                                         );
738                         }
739                 }
740         }
741
742         content_audio.audio = dcp_mapped;
743
744         /* Process */
745
746         if (_audio_processor) {
747                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
748         }
749
750         /* Push */
751
752         _audio_merger.push (content_audio.audio, time);
753         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
754         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
755 }
756
757 void
758 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
759 {
760         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
761
762         shared_ptr<Piece> piece = wp.lock ();
763         if (!piece) {
764                 return;
765         }
766
767         shared_ptr<AudioContent> content = piece->content->audio;
768         DCPOMATIC_ASSERT (content);
769
770         /* Resample */
771         if (stream->frame_rate() != content->resampled_frame_rate()) {
772                 shared_ptr<Resampler> r = resampler (content, stream, true);
773                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
774                 if (ro.first->frames() == 0) {
775                         return;
776                 }
777                 content_audio.audio = ro.first;
778                 content_audio.frame = ro.second;
779         }
780
781         /* Compute time in the DCP */
782         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
783         /* And the end of this block in the DCP */
784         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
785
786         /* Remove anything that comes before the start or after the end of the content */
787         if (time < piece->content->position()) {
788                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
789                 if (!cut.first) {
790                         /* This audio is entirely discarded */
791                         return;
792                 }
793                 content_audio.audio = cut.first;
794                 time = cut.second;
795         } else if (time > piece->content->end()) {
796                 /* Discard it all */
797                 return;
798         } else if (end > piece->content->end()) {
799                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
800                 if (remaining_frames == 0) {
801                         return;
802                 }
803                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
804                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
805                 content_audio.audio = cut;
806         }
807
808         audio_transform (content, stream, content_audio, time);
809 }
810
811 void
812 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
813 {
814         shared_ptr<Piece> piece = wp.lock ();
815         if (!piece) {
816                 return;
817         }
818
819         /* Apply content's subtitle offsets */
820         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
821         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
822
823         /* Apply content's subtitle scale */
824         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
825         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
826
827         /* Apply a corrective translation to keep the subtitle centred after that scale */
828         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
829         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
830
831         PlayerSubtitles ps;
832         ps.image.push_back (subtitle.sub);
833         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
834
835         _active_subtitles.add_from (wp, ps, from);
836 }
837
838 void
839 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
840 {
841         shared_ptr<Piece> piece = wp.lock ();
842         if (!piece) {
843                 return;
844         }
845
846         PlayerSubtitles ps;
847         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
848
849         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
850                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
851                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
852                 float const xs = piece->content->subtitle->x_scale();
853                 float const ys = piece->content->subtitle->y_scale();
854                 float size = s.size();
855
856                 /* Adjust size to express the common part of the scaling;
857                    e.g. if xs = ys = 0.5 we scale size by 2.
858                 */
859                 if (xs > 1e-5 && ys > 1e-5) {
860                         size *= 1 / min (1 / xs, 1 / ys);
861                 }
862                 s.set_size (size);
863
864                 /* Then express aspect ratio changes */
865                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
866                         s.set_aspect_adjust (xs / ys);
867                 }
868
869                 s.set_in (dcp::Time(from.seconds(), 1000));
870                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
871                 ps.add_fonts (piece->content->subtitle->fonts ());
872         }
873
874         _active_subtitles.add_from (wp, ps, from);
875 }
876
877 void
878 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
879 {
880         if (!_active_subtitles.have (wp)) {
881                 return;
882         }
883
884         shared_ptr<Piece> piece = wp.lock ();
885         if (!piece) {
886                 return;
887         }
888
889         DCPTime const dcp_to = content_time_to_dcp (piece, to);
890
891         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
892
893         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
894                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
895         }
896 }
897
898 void
899 Player::seek (DCPTime time, bool accurate)
900 {
901         if (_audio_processor) {
902                 _audio_processor->flush ();
903         }
904
905         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
906                 i->second->flush ();
907                 i->second->reset ();
908         }
909
910         _audio_merger.clear ();
911         _active_subtitles.clear ();
912
913         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
914                 if (time < i->content->position()) {
915                         /* Before; seek to 0 */
916                         i->decoder->seek (ContentTime(), accurate);
917                         i->done = false;
918                 } else if (i->content->position() <= time && time < i->content->end()) {
919                         /* During; seek to position */
920                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
921                         i->done = false;
922                 } else {
923                         /* After; this piece is done */
924                         i->done = true;
925                 }
926         }
927
928         _last_video_time = optional<DCPTime> ();
929         _last_audio_time = optional<DCPTime> ();
930         _last_seek_time = time;
931         _last_seek_accurate = accurate;
932 }
933
934 shared_ptr<Resampler>
935 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
936 {
937         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
938         if (i != _resamplers.end ()) {
939                 return i->second;
940         }
941
942         if (!create) {
943                 return shared_ptr<Resampler> ();
944         }
945
946         LOG_GENERAL (
947                 "Creating new resampler from %1 to %2 with %3 channels",
948                 stream->frame_rate(),
949                 content->resampled_frame_rate(),
950                 stream->channels()
951                 );
952
953         shared_ptr<Resampler> r (
954                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
955                 );
956
957         _resamplers[make_pair(content, stream)] = r;
958         return r;
959 }
960
961 void
962 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
963 {
964         optional<PositionImage> subtitles = subtitles_for_frame (time);
965         if (subtitles) {
966                 pv->set_subtitle (subtitles.get ());
967         }
968         Video (pv, time);
969         _last_video_time = time + one_video_frame();
970         _active_subtitles.clear_before (time);
971 }
972
973 void
974 Player::fill_audio (DCPTimePeriod period)
975 {
976         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
977                 DCPTime t = i.from;
978                 while (t < i.to) {
979                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
980                         Frame const samples = block.frames_round(_film->audio_frame_rate());
981                         if (samples) {
982                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
983                                 silence->make_silent ();
984                                 Audio (silence, t);
985                         }
986                         t += block;
987                 }
988         }
989 }
990
991 DCPTime
992 Player::one_video_frame () const
993 {
994         return DCPTime::from_frames (1, _film->video_frame_rate ());
995 }
996
997 pair<shared_ptr<AudioBuffers>, DCPTime>
998 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
999 {
1000         DCPTime const discard_time = discard_to - time;
1001         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1002         Frame remaining_frames = audio->frames() - discard_frames;
1003         if (remaining_frames <= 0) {
1004                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1005         }
1006         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1007         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1008         return make_pair(cut, time + discard_time);
1009 }