Fix up black-filling logic.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _last_seek_accurate (true)
91         , _audio_merger (_film->audio_frame_rate())
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 void
104 Player::setup_pieces ()
105 {
106         _pieces.clear ();
107
108         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109
110                 if (!i->paths_valid ()) {
111                         continue;
112                 }
113
114                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116
117                 if (!decoder) {
118                         /* Not something that we can decode; e.g. Atmos content */
119                         continue;
120                 }
121
122                 if (decoder->video && _ignore_video) {
123                         decoder->video->set_ignore ();
124                 }
125
126                 if (decoder->audio && _ignore_audio) {
127                         decoder->audio->set_ignore ();
128                 }
129
130                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131                 if (dcp && _play_referenced) {
132                         dcp->set_decode_referenced ();
133                 }
134
135                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136                 _pieces.push_back (piece);
137
138                 if (decoder->video) {
139                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140                 }
141
142                 if (decoder->audio) {
143                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144                 }
145
146                 if (decoder->subtitle) {
147                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
150                 }
151         }
152
153         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154                 if (i->content->audio) {
155                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156                                 _stream_states[j] = StreamState (i, i->content->position ());
157                         }
158                 }
159         }
160
161         if (!_play_referenced) {
162                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164                         if (dc) {
165                                 if (dc->reference_video()) {
166                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167                                 }
168                                 if (dc->reference_audio()) {
169                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
170                                 }
171                         }
172                 }
173         }
174
175         _last_video_time = optional<DCPTime> ();
176         _last_audio_time = optional<DCPTime> ();
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::FRAME_TYPE ||
195                 property == DCPContentProperty::NEEDS_ASSETS ||
196                 property == DCPContentProperty::NEEDS_KDM ||
197                 property == SubtitleContentProperty::COLOUR ||
198                 property == SubtitleContentProperty::OUTLINE ||
199                 property == SubtitleContentProperty::SHADOW ||
200                 property == SubtitleContentProperty::EFFECT_COLOUR ||
201                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202                 property == VideoContentProperty::COLOUR_CONVERSION
203                 ) {
204
205                 _have_valid_pieces = false;
206                 Changed (frequent);
207
208         } else if (
209                 property == SubtitleContentProperty::LINE_SPACING ||
210                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211                 property == SubtitleContentProperty::Y_SCALE ||
212                 property == SubtitleContentProperty::FADE_IN ||
213                 property == SubtitleContentProperty::FADE_OUT ||
214                 property == ContentProperty::VIDEO_FRAME_RATE ||
215                 property == SubtitleContentProperty::USE ||
216                 property == SubtitleContentProperty::X_OFFSET ||
217                 property == SubtitleContentProperty::Y_OFFSET ||
218                 property == SubtitleContentProperty::X_SCALE ||
219                 property == SubtitleContentProperty::FONTS ||
220                 property == VideoContentProperty::CROP ||
221                 property == VideoContentProperty::SCALE ||
222                 property == VideoContentProperty::FADE_IN ||
223                 property == VideoContentProperty::FADE_OUT
224                 ) {
225
226                 Changed (frequent);
227         }
228 }
229
230 void
231 Player::set_video_container_size (dcp::Size s)
232 {
233         if (s == _video_container_size) {
234                 return;
235         }
236
237         _video_container_size = s;
238
239         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240         _black_image->make_black ();
241
242         Changed (false);
243 }
244
245 void
246 Player::playlist_changed ()
247 {
248         _have_valid_pieces = false;
249         Changed (false);
250 }
251
252 void
253 Player::film_changed (Film::Property p)
254 {
255         /* Here we should notice Film properties that affect our output, and
256            alert listeners that our output now would be different to how it was
257            last time we were run.
258         */
259
260         if (p == Film::CONTAINER) {
261                 Changed (false);
262         } else if (p == Film::VIDEO_FRAME_RATE) {
263                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264                    so we need new pieces here.
265                 */
266                 _have_valid_pieces = false;
267                 Changed (false);
268         } else if (p == Film::AUDIO_PROCESSOR) {
269                 if (_film->audio_processor ()) {
270                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
271                 }
272         }
273 }
274
275 list<PositionImage>
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 {
278         list<PositionImage> all;
279
280         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
281                 if (!i->image) {
282                         continue;
283                 }
284
285                 /* We will scale the subtitle up to fit _video_container_size */
286                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287
288                 /* Then we need a corrective translation, consisting of two parts:
289                  *
290                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
291                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292                  *
293                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296                  *
297                  * Combining these two translations gives these expressions.
298                  */
299
300                 all.push_back (
301                         PositionImage (
302                                 i->image->scale (
303                                         scaled_size,
304                                         dcp::YUV_TO_RGB_REC601,
305                                         i->image->pixel_format (),
306                                         true,
307                                         _fast
308                                         ),
309                                 Position<int> (
310                                         lrint (_video_container_size.width * i->rectangle.x),
311                                         lrint (_video_container_size.height * i->rectangle.y)
312                                         )
313                                 )
314                         );
315         }
316
317         return all;
318 }
319
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
322 {
323         return shared_ptr<PlayerVideo> (
324                 new PlayerVideo (
325                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326                         Crop (),
327                         optional<double> (),
328                         _video_container_size,
329                         _video_container_size,
330                         EYES_BOTH,
331                         PART_WHOLE,
332                         PresetColourConversion::all().front().conversion
333                 )
334         );
335 }
336
337 Frame
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 {
340         DCPTime s = t - piece->content->position ();
341         s = min (piece->content->length_after_trim(), s);
342         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343
344         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345            then convert that ContentTime to frames at the content's rate.  However this fails for
346            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
347            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348
349            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350         */
351         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
352 }
353
354 DCPTime
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 {
357         /* See comment in dcp_to_content_video */
358         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359         return max (DCPTime (), d + piece->content->position ());
360 }
361
362 Frame
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         /* See notes in dcp_to_content_video */
368         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
369 }
370
371 DCPTime
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376         return max (DCPTime (), d + piece->content->position ());
377 }
378
379 ContentTime
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(), s);
384         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
385 }
386
387 DCPTime
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 {
390         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 }
392
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         list<shared_ptr<Font> > fonts;
401         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402                 if (p->content->subtitle) {
403                         /* XXX: things may go wrong if there are duplicate font IDs
404                            with different font files.
405                         */
406                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407                         copy (f.begin(), f.end(), back_inserter (fonts));
408                 }
409         }
410
411         return fonts;
412 }
413
414 /** Set this player never to produce any video data */
415 void
416 Player::set_ignore_video ()
417 {
418         _ignore_video = true;
419 }
420
421 /** Set whether or not this player should always burn text subtitles into the image,
422  *  regardless of the content settings.
423  *  @param burn true to always burn subtitles, false to obey content settings.
424  */
425 void
426 Player::set_always_burn_subtitles (bool burn)
427 {
428         _always_burn_subtitles = burn;
429 }
430
431 void
432 Player::set_fast ()
433 {
434         _fast = true;
435         _have_valid_pieces = false;
436 }
437
438 void
439 Player::set_play_referenced ()
440 {
441         _play_referenced = true;
442         _have_valid_pieces = false;
443 }
444
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
447 {
448         list<ReferencedReelAsset> a;
449
450         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
452                 if (!j) {
453                         continue;
454                 }
455
456                 scoped_ptr<DCPDecoder> decoder;
457                 try {
458                         decoder.reset (new DCPDecoder (j, _film->log()));
459                 } catch (...) {
460                         return a;
461                 }
462
463                 int64_t offset = 0;
464                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465
466                         DCPOMATIC_ASSERT (j->video_frame_rate ());
467                         double const cfr = j->video_frame_rate().get();
468                         Frame const trim_start = j->trim_start().frames_round (cfr);
469                         Frame const trim_end = j->trim_end().frames_round (cfr);
470                         int const ffr = _film->video_frame_rate ();
471
472                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473                         if (j->reference_video ()) {
474                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475                                 DCPOMATIC_ASSERT (ra);
476                                 ra->set_entry_point (ra->entry_point() + trim_start);
477                                 ra->set_duration (ra->duration() - trim_start - trim_end);
478                                 a.push_back (
479                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480                                         );
481                         }
482
483                         if (j->reference_audio ()) {
484                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485                                 DCPOMATIC_ASSERT (ra);
486                                 ra->set_entry_point (ra->entry_point() + trim_start);
487                                 ra->set_duration (ra->duration() - trim_start - trim_end);
488                                 a.push_back (
489                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490                                         );
491                         }
492
493                         if (j->reference_subtitle ()) {
494                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495                                 DCPOMATIC_ASSERT (ra);
496                                 ra->set_entry_point (ra->entry_point() + trim_start);
497                                 ra->set_duration (ra->duration() - trim_start - trim_end);
498                                 a.push_back (
499                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500                                         );
501                         }
502
503                         /* Assume that main picture duration is the length of the reel */
504                         offset += k->main_picture()->duration ();
505                 }
506         }
507
508         return a;
509 }
510
511 bool
512 Player::pass ()
513 {
514         if (!_have_valid_pieces) {
515                 setup_pieces ();
516         }
517
518         shared_ptr<Piece> earliest;
519         DCPTime earliest_content;
520
521         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522                 if (!i->done) {
523                         DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524                         if (!earliest || t < earliest_content) {
525                                 earliest_content = t;
526                                 earliest = i;
527                         }
528                 }
529         }
530
531         if (earliest) {
532                 earliest->done = earliest->decoder->pass ();
533                 if (earliest->done && earliest->content->audio) {
534                         /* Flush the Player audio system for this piece */
535                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536                                 audio_flush (earliest, i);
537                         }
538                 }
539         }
540
541         DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
542
543         optional<DCPTime> fill_from;
544         if (_last_video_time) {
545                 /* No seek; fill towards the next thing that might happen (or the end of the playlist) */
546                 fill_from = _last_video_time;
547         } else if (_last_seek_time && !_playlist->video_content_at(_last_seek_time.get())) {
548                 /* Seek into an empty area; fill from the seek time */
549                 fill_from = _last_seek_time;
550         }
551
552         if (fill_from && ((fill_towards - fill_from.get())) > one_video_frame()) {
553                 emit_video (black_player_video_frame(), fill_from.get());
554         } else if (_playlist->length() == DCPTime()) {
555                 emit_video (black_player_video_frame(), DCPTime());
556         }
557
558         if (!earliest && !fill_from) {
559                 return true;
560         }
561
562         /* Emit any audio that is ready */
563
564         DCPTime pull_from = _playlist->length ();
565         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
566                 if (!i->second.piece->done && i->second.last_push_end < pull_from) {
567                         pull_from = i->second.last_push_end;
568                 }
569         }
570
571         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
572         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
573                 if (_last_audio_time && i->second < _last_audio_time.get()) {
574                         /* There has been an accurate seek and we have received some audio before the seek time;
575                            discard it.
576                         */
577                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
578                         if (!cut.first) {
579                                 continue;
580                         }
581                         *i = cut;
582                 }
583
584                 if (_last_audio_time) {
585                         fill_audio (DCPTimePeriod (_last_audio_time.get(), i->second));
586                 }
587
588                 Audio (i->first, i->second);
589                 _last_audio_time = i->second + DCPTime::from_frames(i->first->frames(), _film->audio_frame_rate());
590         }
591
592         return false;
593 }
594
595 optional<PositionImage>
596 Player::subtitles_for_frame (DCPTime time) const
597 {
598         list<PositionImage> subtitles;
599
600         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
601
602                 /* Image subtitles */
603                 list<PositionImage> c = transform_image_subtitles (i.image);
604                 copy (c.begin(), c.end(), back_inserter (subtitles));
605
606                 /* Text subtitles (rendered to an image) */
607                 if (!i.text.empty ()) {
608                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
609                         copy (s.begin(), s.end(), back_inserter (subtitles));
610                 }
611         }
612
613         if (subtitles.empty ()) {
614                 return optional<PositionImage> ();
615         }
616
617         return merge (subtitles);
618 }
619
620 void
621 Player::video (weak_ptr<Piece> wp, ContentVideo video)
622 {
623         shared_ptr<Piece> piece = wp.lock ();
624         if (!piece) {
625                 return;
626         }
627
628         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
629         if (frc.skip && (video.frame % 2) == 1) {
630                 return;
631         }
632
633         /* Time and period of the frame we will emit */
634         DCPTime const time = content_video_to_dcp (piece, video.frame);
635         DCPTimePeriod const period (time, time + one_video_frame());
636
637         /* Discard if it's outside the content's period or if it's before the last accurate seek */
638         if (
639                 time < piece->content->position() ||
640                 time >= piece->content->end() ||
641                 (_last_seek_time && _last_seek_accurate && time < _last_seek_time.get())) {
642                 return;
643         }
644
645         /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video.  We have to do this here
646            as in the problematic case we are about to emit a frame which is not contiguous with the previous.
647         */
648
649         if (_last_video_time) {
650                 /* XXX: this may not work for 3D */
651                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (_last_video_time.get(), time), _no_video)) {
652                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
653                                 if (_last_video) {
654                                         emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
655                                 } else {
656                                         emit_video (black_player_video_frame(), j);
657                                 }
658                         }
659                 }
660         }
661
662         _last_video.reset (
663                 new PlayerVideo (
664                         video.image,
665                         piece->content->video->crop (),
666                         piece->content->video->fade (video.frame),
667                         piece->content->video->scale().size (
668                                 piece->content->video, _video_container_size, _film->frame_size ()
669                                 ),
670                         _video_container_size,
671                         video.eyes,
672                         video.part,
673                         piece->content->video->colour_conversion ()
674                         )
675                 );
676
677         emit_video (_last_video, time);
678 }
679
680 void
681 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
682 {
683         shared_ptr<AudioContent> content = piece->content->audio;
684         DCPOMATIC_ASSERT (content);
685
686         shared_ptr<Resampler> r = resampler (content, stream, false);
687         if (!r) {
688                 return;
689         }
690
691         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
692         if (ro.first->frames() == 0) {
693                 return;
694         }
695
696         ContentAudio content_audio;
697         content_audio.audio = ro.first;
698         content_audio.frame = ro.second;
699
700         /* Compute time in the DCP */
701         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
702
703         audio_transform (content, stream, content_audio, time);
704 }
705
706 /** Do our common processing on some audio */
707 void
708 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
709 {
710         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
711
712         /* Gain */
713
714         if (content->gain() != 0) {
715                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
716                 gain->apply_gain (content->gain ());
717                 content_audio.audio = gain;
718         }
719
720         /* Remap */
721
722         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
723         dcp_mapped->make_silent ();
724
725         AudioMapping map = stream->mapping ();
726         for (int i = 0; i < map.input_channels(); ++i) {
727                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
728                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
729                                 dcp_mapped->accumulate_channel (
730                                         content_audio.audio.get(),
731                                         i,
732                                         static_cast<dcp::Channel> (j),
733                                         map.get (i, static_cast<dcp::Channel> (j))
734                                         );
735                         }
736                 }
737         }
738
739         content_audio.audio = dcp_mapped;
740
741         /* Process */
742
743         if (_audio_processor) {
744                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
745         }
746
747         /* Push */
748
749         _audio_merger.push (content_audio.audio, time);
750         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
751         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
752 }
753
754 void
755 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
756 {
757         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
758
759         shared_ptr<Piece> piece = wp.lock ();
760         if (!piece) {
761                 return;
762         }
763
764         shared_ptr<AudioContent> content = piece->content->audio;
765         DCPOMATIC_ASSERT (content);
766
767         /* Resample */
768         if (stream->frame_rate() != content->resampled_frame_rate()) {
769                 shared_ptr<Resampler> r = resampler (content, stream, true);
770                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
771                 if (ro.first->frames() == 0) {
772                         return;
773                 }
774                 content_audio.audio = ro.first;
775                 content_audio.frame = ro.second;
776         }
777
778         /* Compute time in the DCP */
779         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
780         /* And the end of this block in the DCP */
781         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
782
783         /* Remove anything that comes before the start or after the end of the content */
784         if (time < piece->content->position()) {
785                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
786                 if (!cut.first) {
787                         /* This audio is entirely discarded */
788                         return;
789                 }
790                 content_audio.audio = cut.first;
791                 time = cut.second;
792         } else if (time > piece->content->end()) {
793                 /* Discard it all */
794                 return;
795         } else if (end > piece->content->end()) {
796                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
797                 DCPOMATIC_ASSERT (remaining_frames > 0);
798                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
799                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
800                 content_audio.audio = cut;
801         }
802
803         audio_transform (content, stream, content_audio, time);
804 }
805
806 void
807 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
808 {
809         shared_ptr<Piece> piece = wp.lock ();
810         if (!piece) {
811                 return;
812         }
813
814         /* Apply content's subtitle offsets */
815         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
816         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
817
818         /* Apply content's subtitle scale */
819         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
820         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
821
822         /* Apply a corrective translation to keep the subtitle centred after that scale */
823         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
824         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
825
826         PlayerSubtitles ps;
827         ps.image.push_back (subtitle.sub);
828         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
829
830         _active_subtitles.add_from (wp, ps, from);
831 }
832
833 void
834 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
835 {
836         shared_ptr<Piece> piece = wp.lock ();
837         if (!piece) {
838                 return;
839         }
840
841         PlayerSubtitles ps;
842         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
843
844         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
845                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
846                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
847                 float const xs = piece->content->subtitle->x_scale();
848                 float const ys = piece->content->subtitle->y_scale();
849                 float size = s.size();
850
851                 /* Adjust size to express the common part of the scaling;
852                    e.g. if xs = ys = 0.5 we scale size by 2.
853                 */
854                 if (xs > 1e-5 && ys > 1e-5) {
855                         size *= 1 / min (1 / xs, 1 / ys);
856                 }
857                 s.set_size (size);
858
859                 /* Then express aspect ratio changes */
860                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
861                         s.set_aspect_adjust (xs / ys);
862                 }
863
864                 s.set_in (dcp::Time(from.seconds(), 1000));
865                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
866                 ps.add_fonts (piece->content->subtitle->fonts ());
867         }
868
869         _active_subtitles.add_from (wp, ps, from);
870 }
871
872 void
873 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
874 {
875         if (!_active_subtitles.have (wp)) {
876                 return;
877         }
878
879         shared_ptr<Piece> piece = wp.lock ();
880         if (!piece) {
881                 return;
882         }
883
884         DCPTime const dcp_to = content_time_to_dcp (piece, to);
885
886         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
887
888         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
889                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
890         }
891 }
892
893 void
894 Player::seek (DCPTime time, bool accurate)
895 {
896         if (_audio_processor) {
897                 _audio_processor->flush ();
898         }
899
900         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
901                 i->second->flush ();
902                 i->second->reset ();
903         }
904
905         _audio_merger.clear ();
906         _active_subtitles.clear ();
907
908         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
909                 if (time < i->content->position()) {
910                         /* Before; seek to 0 */
911                         i->decoder->seek (ContentTime(), accurate);
912                         i->done = false;
913                 } else if (i->content->position() <= time && time < i->content->end()) {
914                         /* During; seek to position */
915                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
916                         i->done = false;
917                 } else {
918                         /* After; this piece is done */
919                         i->done = true;
920                 }
921         }
922
923         _last_video_time = optional<DCPTime> ();
924         _last_audio_time = optional<DCPTime> ();
925         _last_seek_time = time;
926         _last_seek_accurate = accurate;
927 }
928
929 shared_ptr<Resampler>
930 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
931 {
932         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
933         if (i != _resamplers.end ()) {
934                 return i->second;
935         }
936
937         if (!create) {
938                 return shared_ptr<Resampler> ();
939         }
940
941         LOG_GENERAL (
942                 "Creating new resampler from %1 to %2 with %3 channels",
943                 stream->frame_rate(),
944                 content->resampled_frame_rate(),
945                 stream->channels()
946                 );
947
948         shared_ptr<Resampler> r (
949                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
950                 );
951
952         _resamplers[make_pair(content, stream)] = r;
953         return r;
954 }
955
956 void
957 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
958 {
959         optional<PositionImage> subtitles = subtitles_for_frame (time);
960         if (subtitles) {
961                 pv->set_subtitle (subtitles.get ());
962         }
963         Video (pv, time);
964         _last_video_time = time + one_video_frame();
965         _active_subtitles.clear_before (time);
966 }
967
968 void
969 Player::fill_audio (DCPTimePeriod period)
970 {
971         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
972                 DCPTime t = i.from;
973                 while (t < i.to) {
974                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
975                         Frame const samples = block.frames_round(_film->audio_frame_rate());
976                         if (samples) {
977                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
978                                 silence->make_silent ();
979                                 Audio (silence, t);
980                         }
981                         t += block;
982                 }
983         }
984 }
985
986 DCPTime
987 Player::one_video_frame () const
988 {
989         return DCPTime::from_frames (1, _film->video_frame_rate ());
990 }
991
992 pair<shared_ptr<AudioBuffers>, DCPTime>
993 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
994 {
995         DCPTime const discard_time = discard_to - time;
996         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
997         Frame remaining_frames = audio->frames() - discard_frames;
998         if (remaining_frames <= 0) {
999                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1000         }
1001         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1002         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1003         return make_pair(cut, time + discard_time);
1004 }