Add a OV/VF test; tidy up a bit.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
56 #include <stdint.h>
57 #include <algorithm>
58 #include <iostream>
59
60 #include "i18n.h"
61
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
81         : _film (film)
82         , _playlist (playlist)
83         , _have_valid_pieces (false)
84         , _ignore_video (false)
85         , _ignore_audio (false)
86         , _always_burn_subtitles (false)
87         , _fast (false)
88         , _play_referenced (false)
89         , _audio_merger (_film->audio_frame_rate())
90 {
91         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94         set_video_container_size (_film->frame_size ());
95
96         film_changed (Film::AUDIO_PROCESSOR);
97
98         seek (DCPTime (), true);
99 }
100
101 void
102 Player::setup_pieces ()
103 {
104         _pieces.clear ();
105
106         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
107
108                 if (!i->paths_valid ()) {
109                         continue;
110                 }
111
112                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
114
115                 if (!decoder) {
116                         /* Not something that we can decode; e.g. Atmos content */
117                         continue;
118                 }
119
120                 if (decoder->video && _ignore_video) {
121                         decoder->video->set_ignore ();
122                 }
123
124                 if (decoder->audio && _ignore_audio) {
125                         decoder->audio->set_ignore ();
126                 }
127
128                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129                 if (dcp && _play_referenced) {
130                         dcp->set_decode_referenced ();
131                 }
132
133                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134                 _pieces.push_back (piece);
135
136                 if (decoder->video) {
137                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
138                 }
139
140                 if (decoder->audio) {
141                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
142                 }
143
144                 if (decoder->subtitle) {
145                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
148                 }
149         }
150
151         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152                 if (i->content->audio) {
153                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154                                 _stream_states[j] = StreamState (i, i->content->position ());
155                         }
156                 }
157         }
158
159         _last_video_time = DCPTime ();
160         _last_audio_time = DCPTime ();
161         _have_valid_pieces = true;
162 }
163
164 void
165 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
166 {
167         shared_ptr<Content> c = w.lock ();
168         if (!c) {
169                 return;
170         }
171
172         if (
173                 property == ContentProperty::POSITION ||
174                 property == ContentProperty::LENGTH ||
175                 property == ContentProperty::TRIM_START ||
176                 property == ContentProperty::TRIM_END ||
177                 property == ContentProperty::PATH ||
178                 property == VideoContentProperty::FRAME_TYPE ||
179                 property == DCPContentProperty::NEEDS_ASSETS ||
180                 property == DCPContentProperty::NEEDS_KDM ||
181                 property == SubtitleContentProperty::COLOUR ||
182                 property == SubtitleContentProperty::OUTLINE ||
183                 property == SubtitleContentProperty::SHADOW ||
184                 property == SubtitleContentProperty::EFFECT_COLOUR ||
185                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
186                 property == VideoContentProperty::COLOUR_CONVERSION
187                 ) {
188
189                 _have_valid_pieces = false;
190                 Changed (frequent);
191
192         } else if (
193                 property == SubtitleContentProperty::LINE_SPACING ||
194                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
195                 property == SubtitleContentProperty::Y_SCALE ||
196                 property == SubtitleContentProperty::FADE_IN ||
197                 property == SubtitleContentProperty::FADE_OUT ||
198                 property == ContentProperty::VIDEO_FRAME_RATE ||
199                 property == SubtitleContentProperty::USE ||
200                 property == SubtitleContentProperty::X_OFFSET ||
201                 property == SubtitleContentProperty::Y_OFFSET ||
202                 property == SubtitleContentProperty::X_SCALE ||
203                 property == SubtitleContentProperty::FONTS ||
204                 property == VideoContentProperty::CROP ||
205                 property == VideoContentProperty::SCALE ||
206                 property == VideoContentProperty::FADE_IN ||
207                 property == VideoContentProperty::FADE_OUT
208                 ) {
209
210                 Changed (frequent);
211         }
212 }
213
214 void
215 Player::set_video_container_size (dcp::Size s)
216 {
217         if (s == _video_container_size) {
218                 return;
219         }
220
221         _video_container_size = s;
222
223         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
224         _black_image->make_black ();
225
226         Changed (false);
227 }
228
229 void
230 Player::playlist_changed ()
231 {
232         _have_valid_pieces = false;
233         Changed (false);
234 }
235
236 void
237 Player::film_changed (Film::Property p)
238 {
239         /* Here we should notice Film properties that affect our output, and
240            alert listeners that our output now would be different to how it was
241            last time we were run.
242         */
243
244         if (p == Film::CONTAINER) {
245                 Changed (false);
246         } else if (p == Film::VIDEO_FRAME_RATE) {
247                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
248                    so we need new pieces here.
249                 */
250                 _have_valid_pieces = false;
251                 Changed (false);
252         } else if (p == Film::AUDIO_PROCESSOR) {
253                 if (_film->audio_processor ()) {
254                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
255                 }
256         }
257 }
258
259 list<PositionImage>
260 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
261 {
262         list<PositionImage> all;
263
264         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
265                 if (!i->image) {
266                         continue;
267                 }
268
269                 /* We will scale the subtitle up to fit _video_container_size */
270                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
271
272                 /* Then we need a corrective translation, consisting of two parts:
273                  *
274                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
275                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
276                  *
277                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
278                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
279                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
280                  *
281                  * Combining these two translations gives these expressions.
282                  */
283
284                 all.push_back (
285                         PositionImage (
286                                 i->image->scale (
287                                         scaled_size,
288                                         dcp::YUV_TO_RGB_REC601,
289                                         i->image->pixel_format (),
290                                         true,
291                                         _fast
292                                         ),
293                                 Position<int> (
294                                         lrint (_video_container_size.width * i->rectangle.x),
295                                         lrint (_video_container_size.height * i->rectangle.y)
296                                         )
297                                 )
298                         );
299         }
300
301         return all;
302 }
303
304 shared_ptr<PlayerVideo>
305 Player::black_player_video_frame () const
306 {
307         return shared_ptr<PlayerVideo> (
308                 new PlayerVideo (
309                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
310                         Crop (),
311                         optional<double> (),
312                         _video_container_size,
313                         _video_container_size,
314                         EYES_BOTH,
315                         PART_WHOLE,
316                         PresetColourConversion::all().front().conversion
317                 )
318         );
319 }
320
321 Frame
322 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
323 {
324         DCPTime s = t - piece->content->position ();
325         s = min (piece->content->length_after_trim(), s);
326         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
327
328         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
329            then convert that ContentTime to frames at the content's rate.  However this fails for
330            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
331            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
332
333            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
334         */
335         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
336 }
337
338 DCPTime
339 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
340 {
341         /* See comment in dcp_to_content_video */
342         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
343         return max (DCPTime (), d + piece->content->position ());
344 }
345
346 Frame
347 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
348 {
349         DCPTime s = t - piece->content->position ();
350         s = min (piece->content->length_after_trim(), s);
351         /* See notes in dcp_to_content_video */
352         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
353 }
354
355 DCPTime
356 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
357 {
358         /* See comment in dcp_to_content_video */
359         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
360         return max (DCPTime (), d + piece->content->position ());
361 }
362
363 ContentTime
364 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
365 {
366         DCPTime s = t - piece->content->position ();
367         s = min (piece->content->length_after_trim(), s);
368         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
369 }
370
371 DCPTime
372 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
373 {
374         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
375 }
376
377 list<shared_ptr<Font> >
378 Player::get_subtitle_fonts ()
379 {
380         if (!_have_valid_pieces) {
381                 setup_pieces ();
382         }
383
384         list<shared_ptr<Font> > fonts;
385         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
386                 if (p->content->subtitle) {
387                         /* XXX: things may go wrong if there are duplicate font IDs
388                            with different font files.
389                         */
390                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
391                         copy (f.begin(), f.end(), back_inserter (fonts));
392                 }
393         }
394
395         return fonts;
396 }
397
398 /** Set this player never to produce any video data */
399 void
400 Player::set_ignore_video ()
401 {
402         _ignore_video = true;
403 }
404
405 /** Set whether or not this player should always burn text subtitles into the image,
406  *  regardless of the content settings.
407  *  @param burn true to always burn subtitles, false to obey content settings.
408  */
409 void
410 Player::set_always_burn_subtitles (bool burn)
411 {
412         _always_burn_subtitles = burn;
413 }
414
415 void
416 Player::set_fast ()
417 {
418         _fast = true;
419         _have_valid_pieces = false;
420 }
421
422 void
423 Player::set_play_referenced ()
424 {
425         _play_referenced = true;
426         _have_valid_pieces = false;
427 }
428
429 list<ReferencedReelAsset>
430 Player::get_reel_assets ()
431 {
432         list<ReferencedReelAsset> a;
433
434         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
435                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
436                 if (!j) {
437                         continue;
438                 }
439
440                 scoped_ptr<DCPDecoder> decoder;
441                 try {
442                         decoder.reset (new DCPDecoder (j, _film->log()));
443                 } catch (...) {
444                         return a;
445                 }
446
447                 int64_t offset = 0;
448                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
449
450                         DCPOMATIC_ASSERT (j->video_frame_rate ());
451                         double const cfr = j->video_frame_rate().get();
452                         Frame const trim_start = j->trim_start().frames_round (cfr);
453                         Frame const trim_end = j->trim_end().frames_round (cfr);
454                         int const ffr = _film->video_frame_rate ();
455
456                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
457                         if (j->reference_video ()) {
458                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
459                                 DCPOMATIC_ASSERT (ra);
460                                 ra->set_entry_point (ra->entry_point() + trim_start);
461                                 ra->set_duration (ra->duration() - trim_start - trim_end);
462                                 a.push_back (
463                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
464                                         );
465                         }
466
467                         if (j->reference_audio ()) {
468                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
469                                 DCPOMATIC_ASSERT (ra);
470                                 ra->set_entry_point (ra->entry_point() + trim_start);
471                                 ra->set_duration (ra->duration() - trim_start - trim_end);
472                                 a.push_back (
473                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
474                                         );
475                         }
476
477                         if (j->reference_subtitle ()) {
478                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
479                                 DCPOMATIC_ASSERT (ra);
480                                 ra->set_entry_point (ra->entry_point() + trim_start);
481                                 ra->set_duration (ra->duration() - trim_start - trim_end);
482                                 a.push_back (
483                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
484                                         );
485                         }
486
487                         /* Assume that main picture duration is the length of the reel */
488                         offset += k->main_picture()->duration ();
489                 }
490         }
491
492         return a;
493 }
494
495 bool
496 Player::pass ()
497 {
498         if (!_have_valid_pieces) {
499                 setup_pieces ();
500         }
501
502         bool filled = false;
503
504         if (_last_video_time && !_playlist->video_content_at(*_last_video_time) && *_last_video_time < _playlist->length()) {
505                 /* _last_video_time is the time just after the last video we emitted, and there is no video content
506                    at this time so we need to emit some black.
507                 */
508                 emit_video (black_player_video_frame(), *_last_video_time);
509                 filled = true;
510         } else if (_playlist->length() == DCPTime()) {
511                 /* Special case of an empty Film; just give one black frame */
512                 emit_video (black_player_video_frame(), DCPTime());
513                 filled = true;
514         }
515
516         if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time) && *_last_audio_time < _playlist->length()) {
517                 /* _last_audio_time is the time just after the last audio we emitted.  There is no audio here
518                    so we need to emit some silence.
519                 */
520                 shared_ptr<Content> next = _playlist->next_audio_content(*_last_audio_time);
521                 DCPTimePeriod period (*_last_audio_time, next ? next->position() : _playlist->length());
522                 if (period.duration() > one_video_frame()) {
523                         period = DCPTimePeriod (*_last_audio_time, *_last_audio_time + one_video_frame());
524                 }
525                 fill_audio (period);
526                 filled = true;
527         }
528
529         /* Now pass() the decoder which is farthest behind where we are */
530
531         shared_ptr<Piece> earliest;
532         DCPTime earliest_content;
533
534         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
535                 if (!i->done) {
536                         DCPTime const t = content_time_to_dcp (i, i->decoder->position());
537                         if (!earliest || t < earliest_content) {
538                                 earliest_content = t;
539                                 earliest = i;
540                         }
541                 }
542         }
543
544         if (!filled && earliest) {
545                 earliest->done = earliest->decoder->pass ();
546         }
547
548         /* Emit any audio that is ready */
549
550         DCPTime pull_to = _playlist->length ();
551         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
552                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
553                         pull_to = i->second.last_push_end;
554                 }
555         }
556
557         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
558         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
559                 if (_last_audio_time && i->second < *_last_audio_time) {
560                         /* There has been an accurate seek and we have received some audio before the seek time;
561                            discard it.
562                         */
563                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
564                         if (!cut.first) {
565                                 continue;
566                         }
567                         *i = cut;
568                 }
569
570                 if (_last_audio_time) {
571                         /* Fill in the gap before delayed audio; this doesn't need to take into account
572                            periods with no audio as it should only occur in delayed audio case.
573                         */
574                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
575                 }
576
577                 emit_audio (i->first, i->second);
578         }
579
580         return !earliest && !filled;
581 }
582
583 optional<PositionImage>
584 Player::subtitles_for_frame (DCPTime time) const
585 {
586         list<PositionImage> subtitles;
587
588         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
589
590                 /* Image subtitles */
591                 list<PositionImage> c = transform_image_subtitles (i.image);
592                 copy (c.begin(), c.end(), back_inserter (subtitles));
593
594                 /* Text subtitles (rendered to an image) */
595                 if (!i.text.empty ()) {
596                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
597                         copy (s.begin(), s.end(), back_inserter (subtitles));
598                 }
599         }
600
601         if (subtitles.empty ()) {
602                 return optional<PositionImage> ();
603         }
604
605         return merge (subtitles);
606 }
607
608 void
609 Player::video (weak_ptr<Piece> wp, ContentVideo video)
610 {
611         shared_ptr<Piece> piece = wp.lock ();
612         if (!piece) {
613                 return;
614         }
615
616         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
617         if (frc.skip && (video.frame % 2) == 1) {
618                 return;
619         }
620
621         /* Time and period of the frame we will emit */
622         DCPTime const time = content_video_to_dcp (piece, video.frame);
623         DCPTimePeriod const period (time, time + one_video_frame());
624
625         /* Fill gaps that we discover now that we have some video which needs to be emitted */
626
627         if (_last_video_time) {
628                 /* XXX: this may not work for 3D */
629                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
630                 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
631                         LastVideoMap::const_iterator k = _last_video.find (wp);
632                         if (k != _last_video.end ()) {
633                                 emit_video (k->second, j);
634                         } else {
635                                 emit_video (black_player_video_frame(), j);
636                         }
637                 }
638         }
639
640         /* Discard if it's outside the content's period or if it's before the last accurate seek */
641         if (
642                 time < piece->content->position() ||
643                 time >= piece->content->end() ||
644                 (_last_video_time && time < *_last_video_time)) {
645                 return;
646         }
647
648         _last_video[wp].reset (
649                 new PlayerVideo (
650                         video.image,
651                         piece->content->video->crop (),
652                         piece->content->video->fade (video.frame),
653                         piece->content->video->scale().size (
654                                 piece->content->video, _video_container_size, _film->frame_size ()
655                                 ),
656                         _video_container_size,
657                         video.eyes,
658                         video.part,
659                         piece->content->video->colour_conversion ()
660                         )
661                 );
662
663         emit_video (_last_video[wp], time);
664 }
665
666 /** Do our common processing on some audio */
667 void
668 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
669 {
670         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
671
672         /* Gain */
673
674         if (content->gain() != 0) {
675                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
676                 gain->apply_gain (content->gain ());
677                 content_audio.audio = gain;
678         }
679
680         /* Remap */
681
682         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
683
684         /* Process */
685
686         if (_audio_processor) {
687                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
688         }
689
690         /* Push */
691
692         _audio_merger.push (content_audio.audio, time);
693         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
694         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
695 }
696
697 void
698 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
699 {
700         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
701
702         shared_ptr<Piece> piece = wp.lock ();
703         if (!piece) {
704                 return;
705         }
706
707         shared_ptr<AudioContent> content = piece->content->audio;
708         DCPOMATIC_ASSERT (content);
709
710         /* Compute time in the DCP */
711         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
712         /* And the end of this block in the DCP */
713         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
714
715         /* Remove anything that comes before the start or after the end of the content */
716         if (time < piece->content->position()) {
717                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
718                 if (!cut.first) {
719                         /* This audio is entirely discarded */
720                         return;
721                 }
722                 content_audio.audio = cut.first;
723                 time = cut.second;
724         } else if (time > piece->content->end()) {
725                 /* Discard it all */
726                 return;
727         } else if (end > piece->content->end()) {
728                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
729                 if (remaining_frames == 0) {
730                         return;
731                 }
732                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
733                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
734                 content_audio.audio = cut;
735         }
736
737         audio_transform (content, stream, content_audio, time);
738 }
739
740 void
741 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
742 {
743         shared_ptr<Piece> piece = wp.lock ();
744         if (!piece) {
745                 return;
746         }
747
748         /* Apply content's subtitle offsets */
749         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
750         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
751
752         /* Apply content's subtitle scale */
753         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
754         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
755
756         /* Apply a corrective translation to keep the subtitle centred after that scale */
757         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
758         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
759
760         PlayerSubtitles ps;
761         ps.image.push_back (subtitle.sub);
762         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
763
764         _active_subtitles.add_from (wp, ps, from);
765 }
766
767 void
768 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
769 {
770         shared_ptr<Piece> piece = wp.lock ();
771         if (!piece) {
772                 return;
773         }
774
775         PlayerSubtitles ps;
776         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
777
778         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
779                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
780                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
781                 float const xs = piece->content->subtitle->x_scale();
782                 float const ys = piece->content->subtitle->y_scale();
783                 float size = s.size();
784
785                 /* Adjust size to express the common part of the scaling;
786                    e.g. if xs = ys = 0.5 we scale size by 2.
787                 */
788                 if (xs > 1e-5 && ys > 1e-5) {
789                         size *= 1 / min (1 / xs, 1 / ys);
790                 }
791                 s.set_size (size);
792
793                 /* Then express aspect ratio changes */
794                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
795                         s.set_aspect_adjust (xs / ys);
796                 }
797
798                 s.set_in (dcp::Time(from.seconds(), 1000));
799                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
800                 ps.add_fonts (piece->content->subtitle->fonts ());
801         }
802
803         _active_subtitles.add_from (wp, ps, from);
804 }
805
806 void
807 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
808 {
809         if (!_active_subtitles.have (wp)) {
810                 return;
811         }
812
813         shared_ptr<Piece> piece = wp.lock ();
814         if (!piece) {
815                 return;
816         }
817
818         DCPTime const dcp_to = content_time_to_dcp (piece, to);
819
820         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
821
822         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
823                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
824         }
825 }
826
827 void
828 Player::seek (DCPTime time, bool accurate)
829 {
830         if (_audio_processor) {
831                 _audio_processor->flush ();
832         }
833
834         _audio_merger.clear ();
835         _active_subtitles.clear ();
836
837         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
838                 if (time < i->content->position()) {
839                         /* Before; seek to 0 */
840                         i->decoder->seek (ContentTime(), accurate);
841                         i->done = false;
842                 } else if (i->content->position() <= time && time < i->content->end()) {
843                         /* During; seek to position */
844                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
845                         i->done = false;
846                 } else {
847                         /* After; this piece is done */
848                         i->done = true;
849                 }
850         }
851
852         if (accurate) {
853                 _last_video_time = time;
854                 _last_audio_time = time;
855         } else {
856                 _last_video_time = optional<DCPTime>();
857                 _last_audio_time = optional<DCPTime>();
858         }
859
860         _last_video.clear ();
861 }
862
863 void
864 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
865 {
866         optional<PositionImage> subtitles = subtitles_for_frame (time);
867         if (subtitles) {
868                 pv->set_subtitle (subtitles.get ());
869         }
870
871         Video (pv, time);
872
873         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
874                 _last_video_time = time + one_video_frame();
875                 _active_subtitles.clear_before (time);
876         }
877 }
878
879 void
880 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
881 {
882         Audio (data, time);
883         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
884 }
885
886 void
887 Player::fill_audio (DCPTimePeriod period)
888 {
889         if (period.from == period.to) {
890                 return;
891         }
892
893         DCPOMATIC_ASSERT (period.from < period.to);
894
895         DCPTime t = period.from;
896         while (t < period.to) {
897                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
898                 Frame const samples = block.frames_round(_film->audio_frame_rate());
899                 if (samples) {
900                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
901                         silence->make_silent ();
902                         emit_audio (silence, t);
903                 }
904                 t += block;
905         }
906 }
907
908 DCPTime
909 Player::one_video_frame () const
910 {
911         return DCPTime::from_frames (1, _film->video_frame_rate ());
912 }
913
914 pair<shared_ptr<AudioBuffers>, DCPTime>
915 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
916 {
917         DCPTime const discard_time = discard_to - time;
918         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
919         Frame remaining_frames = audio->frames() - discard_frames;
920         if (remaining_frames <= 0) {
921                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
922         }
923         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
924         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
925         return make_pair(cut, time + discard_time);
926 }