Fix heavy fingers in previous commit.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
56 #include <stdint.h>
57 #include <algorithm>
58 #include <iostream>
59
60 #include "i18n.h"
61
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
81         : _film (film)
82         , _playlist (playlist)
83         , _have_valid_pieces (false)
84         , _ignore_video (false)
85         , _ignore_subtitle (false)
86         , _always_burn_subtitles (false)
87         , _fast (false)
88         , _play_referenced (false)
89         , _audio_merger (_film->audio_frame_rate())
90 {
91         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94         set_video_container_size (_film->frame_size ());
95
96         film_changed (Film::AUDIO_PROCESSOR);
97
98         seek (DCPTime (), true);
99 }
100
101 void
102 Player::setup_pieces ()
103 {
104         _pieces.clear ();
105
106         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
107
108                 if (!i->paths_valid ()) {
109                         continue;
110                 }
111
112                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
114
115                 if (!decoder) {
116                         /* Not something that we can decode; e.g. Atmos content */
117                         continue;
118                 }
119
120                 if (decoder->video && _ignore_video) {
121                         decoder->video->set_ignore ();
122                 }
123
124                 if (decoder->subtitle && _ignore_subtitle) {
125                         decoder->subtitle->set_ignore ();
126                 }
127
128                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129                 if (dcp && _play_referenced) {
130                         dcp->set_decode_referenced ();
131                 }
132
133                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134                 _pieces.push_back (piece);
135
136                 if (decoder->video) {
137                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
138                 }
139
140                 if (decoder->audio) {
141                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
142                 }
143
144                 if (decoder->subtitle) {
145                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
148                 }
149         }
150
151         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152                 if (i->content->audio) {
153                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154                                 _stream_states[j] = StreamState (i, i->content->position ());
155                         }
156                 }
157         }
158
159         _black = Empty (_film, bind(&Content::video, _1));
160         _silent = Empty (_film, bind(&Content::audio, _1));
161
162         _last_video_time = DCPTime ();
163         _last_audio_time = DCPTime ();
164         _have_valid_pieces = true;
165 }
166
167 void
168 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
169 {
170         shared_ptr<Content> c = w.lock ();
171         if (!c) {
172                 return;
173         }
174
175         if (
176                 property == ContentProperty::POSITION ||
177                 property == ContentProperty::LENGTH ||
178                 property == ContentProperty::TRIM_START ||
179                 property == ContentProperty::TRIM_END ||
180                 property == ContentProperty::PATH ||
181                 property == VideoContentProperty::FRAME_TYPE ||
182                 property == DCPContentProperty::NEEDS_ASSETS ||
183                 property == DCPContentProperty::NEEDS_KDM ||
184                 property == SubtitleContentProperty::COLOUR ||
185                 property == SubtitleContentProperty::OUTLINE ||
186                 property == SubtitleContentProperty::SHADOW ||
187                 property == SubtitleContentProperty::EFFECT_COLOUR ||
188                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
189                 property == VideoContentProperty::COLOUR_CONVERSION
190                 ) {
191
192                 _have_valid_pieces = false;
193                 Changed (frequent);
194
195         } else if (
196                 property == SubtitleContentProperty::LINE_SPACING ||
197                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
198                 property == SubtitleContentProperty::Y_SCALE ||
199                 property == SubtitleContentProperty::FADE_IN ||
200                 property == SubtitleContentProperty::FADE_OUT ||
201                 property == ContentProperty::VIDEO_FRAME_RATE ||
202                 property == SubtitleContentProperty::USE ||
203                 property == SubtitleContentProperty::X_OFFSET ||
204                 property == SubtitleContentProperty::Y_OFFSET ||
205                 property == SubtitleContentProperty::X_SCALE ||
206                 property == SubtitleContentProperty::FONTS ||
207                 property == VideoContentProperty::CROP ||
208                 property == VideoContentProperty::SCALE ||
209                 property == VideoContentProperty::FADE_IN ||
210                 property == VideoContentProperty::FADE_OUT
211                 ) {
212
213                 Changed (frequent);
214         }
215 }
216
217 void
218 Player::set_video_container_size (dcp::Size s)
219 {
220         if (s == _video_container_size) {
221                 return;
222         }
223
224         _video_container_size = s;
225
226         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
227         _black_image->make_black ();
228
229         Changed (false);
230 }
231
232 void
233 Player::playlist_changed ()
234 {
235         _have_valid_pieces = false;
236         Changed (false);
237 }
238
239 void
240 Player::film_changed (Film::Property p)
241 {
242         /* Here we should notice Film properties that affect our output, and
243            alert listeners that our output now would be different to how it was
244            last time we were run.
245         */
246
247         if (p == Film::CONTAINER) {
248                 Changed (false);
249         } else if (p == Film::VIDEO_FRAME_RATE) {
250                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
251                    so we need new pieces here.
252                 */
253                 _have_valid_pieces = false;
254                 Changed (false);
255         } else if (p == Film::AUDIO_PROCESSOR) {
256                 if (_film->audio_processor ()) {
257                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
258                 }
259         }
260 }
261
262 list<PositionImage>
263 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
264 {
265         list<PositionImage> all;
266
267         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
268                 if (!i->image) {
269                         continue;
270                 }
271
272                 /* We will scale the subtitle up to fit _video_container_size */
273                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
274
275                 /* Then we need a corrective translation, consisting of two parts:
276                  *
277                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
278                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
279                  *
280                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
281                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
282                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
283                  *
284                  * Combining these two translations gives these expressions.
285                  */
286
287                 all.push_back (
288                         PositionImage (
289                                 i->image->scale (
290                                         scaled_size,
291                                         dcp::YUV_TO_RGB_REC601,
292                                         i->image->pixel_format (),
293                                         true,
294                                         _fast
295                                         ),
296                                 Position<int> (
297                                         lrint (_video_container_size.width * i->rectangle.x),
298                                         lrint (_video_container_size.height * i->rectangle.y)
299                                         )
300                                 )
301                         );
302         }
303
304         return all;
305 }
306
307 shared_ptr<PlayerVideo>
308 Player::black_player_video_frame () const
309 {
310         return shared_ptr<PlayerVideo> (
311                 new PlayerVideo (
312                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
313                         Crop (),
314                         optional<double> (),
315                         _video_container_size,
316                         _video_container_size,
317                         EYES_BOTH,
318                         PART_WHOLE,
319                         PresetColourConversion::all().front().conversion
320                 )
321         );
322 }
323
324 Frame
325 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
326 {
327         DCPTime s = t - piece->content->position ();
328         s = min (piece->content->length_after_trim(), s);
329         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
330
331         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
332            then convert that ContentTime to frames at the content's rate.  However this fails for
333            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
334            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
335
336            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
337         */
338         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
339 }
340
341 DCPTime
342 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
343 {
344         /* See comment in dcp_to_content_video */
345         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
346         return max (DCPTime (), d + piece->content->position ());
347 }
348
349 Frame
350 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
351 {
352         DCPTime s = t - piece->content->position ();
353         s = min (piece->content->length_after_trim(), s);
354         /* See notes in dcp_to_content_video */
355         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
356 }
357
358 DCPTime
359 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
360 {
361         /* See comment in dcp_to_content_video */
362         return DCPTime::from_frames (f, _film->audio_frame_rate())
363                 - DCPTime (piece->content->trim_start(), piece->frc)
364                 + piece->content->position();
365 }
366
367 ContentTime
368 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
369 {
370         DCPTime s = t - piece->content->position ();
371         s = min (piece->content->length_after_trim(), s);
372         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
373 }
374
375 DCPTime
376 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
377 {
378         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
379 }
380
381 list<shared_ptr<Font> >
382 Player::get_subtitle_fonts ()
383 {
384         if (!_have_valid_pieces) {
385                 setup_pieces ();
386         }
387
388         list<shared_ptr<Font> > fonts;
389         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
390                 if (p->content->subtitle) {
391                         /* XXX: things may go wrong if there are duplicate font IDs
392                            with different font files.
393                         */
394                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
395                         copy (f.begin(), f.end(), back_inserter (fonts));
396                 }
397         }
398
399         return fonts;
400 }
401
402 /** Set this player never to produce any video data */
403 void
404 Player::set_ignore_video ()
405 {
406         _ignore_video = true;
407 }
408
409 void
410 Player::set_ignore_subtitle ()
411 {
412         _ignore_subtitle = true;
413 }
414
415 /** Set whether or not this player should always burn text subtitles into the image,
416  *  regardless of the content settings.
417  *  @param burn true to always burn subtitles, false to obey content settings.
418  */
419 void
420 Player::set_always_burn_subtitles (bool burn)
421 {
422         _always_burn_subtitles = burn;
423 }
424
425 void
426 Player::set_fast ()
427 {
428         _fast = true;
429         _have_valid_pieces = false;
430 }
431
432 void
433 Player::set_play_referenced ()
434 {
435         _play_referenced = true;
436         _have_valid_pieces = false;
437 }
438
439 list<ReferencedReelAsset>
440 Player::get_reel_assets ()
441 {
442         list<ReferencedReelAsset> a;
443
444         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
445                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
446                 if (!j) {
447                         continue;
448                 }
449
450                 scoped_ptr<DCPDecoder> decoder;
451                 try {
452                         decoder.reset (new DCPDecoder (j, _film->log()));
453                 } catch (...) {
454                         return a;
455                 }
456
457                 int64_t offset = 0;
458                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
459
460                         DCPOMATIC_ASSERT (j->video_frame_rate ());
461                         double const cfr = j->video_frame_rate().get();
462                         Frame const trim_start = j->trim_start().frames_round (cfr);
463                         Frame const trim_end = j->trim_end().frames_round (cfr);
464                         int const ffr = _film->video_frame_rate ();
465
466                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
467                         if (j->reference_video ()) {
468                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
469                                 DCPOMATIC_ASSERT (ra);
470                                 ra->set_entry_point (ra->entry_point() + trim_start);
471                                 ra->set_duration (ra->duration() - trim_start - trim_end);
472                                 a.push_back (
473                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
474                                         );
475                         }
476
477                         if (j->reference_audio ()) {
478                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
479                                 DCPOMATIC_ASSERT (ra);
480                                 ra->set_entry_point (ra->entry_point() + trim_start);
481                                 ra->set_duration (ra->duration() - trim_start - trim_end);
482                                 a.push_back (
483                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
484                                         );
485                         }
486
487                         if (j->reference_subtitle ()) {
488                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
489                                 DCPOMATIC_ASSERT (ra);
490                                 ra->set_entry_point (ra->entry_point() + trim_start);
491                                 ra->set_duration (ra->duration() - trim_start - trim_end);
492                                 a.push_back (
493                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
494                                         );
495                         }
496
497                         /* Assume that main picture duration is the length of the reel */
498                         offset += k->main_picture()->duration ();
499                 }
500         }
501
502         return a;
503 }
504
505 bool
506 Player::pass ()
507 {
508         if (!_have_valid_pieces) {
509                 setup_pieces ();
510         }
511
512         if (_playlist->length() == DCPTime()) {
513                 /* Special case of an empty Film; just give one black frame */
514                 emit_video (black_player_video_frame(), DCPTime());
515                 return true;
516         }
517
518         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
519
520         shared_ptr<Piece> earliest_content;
521         optional<DCPTime> earliest_time;
522
523         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
524                 if (!i->done) {
525                         DCPTime const t = content_time_to_dcp (i, i->decoder->position());
526                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
527                            the video.
528                         */
529                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
530                                 earliest_time = t;
531                                 earliest_content = i;
532                         }
533                 }
534         }
535
536         bool done = false;
537
538         enum {
539                 NONE,
540                 CONTENT,
541                 BLACK,
542                 SILENT
543         } which = NONE;
544
545         if (earliest_content) {
546                 which = CONTENT;
547         }
548
549         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
550                 earliest_time = _black.position ();
551                 which = BLACK;
552         }
553
554         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
555                 earliest_time = _silent.position ();
556                 which = SILENT;
557         }
558
559         switch (which) {
560         case CONTENT:
561                 earliest_content->done = earliest_content->decoder->pass ();
562                 break;
563         case BLACK:
564                 emit_video (black_player_video_frame(), _black.position());
565                 _black.set_position (_black.position() + one_video_frame());
566                 break;
567         case SILENT:
568         {
569                 DCPTimePeriod period (_silent.period_at_position());
570                 if (period.duration() > one_video_frame()) {
571                         period.to = period.from + one_video_frame();
572                 }
573                 fill_audio (period);
574                 _silent.set_position (period.to);
575                 break;
576         }
577         case NONE:
578                 done = true;
579                 break;
580         }
581
582         /* Emit any audio that is ready */
583
584         DCPTime pull_to = _film->length ();
585         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
586                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
587                         pull_to = i->second.last_push_end;
588                 }
589         }
590
591         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
592         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
593                 if (_last_audio_time && i->second < *_last_audio_time) {
594                         /* There has been an accurate seek and we have received some audio before the seek time;
595                            discard it.
596                         */
597                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
598                         if (!cut.first) {
599                                 continue;
600                         }
601                         *i = cut;
602                 }
603
604                 emit_audio (i->first, i->second);
605         }
606
607         return done;
608 }
609
610 optional<PositionImage>
611 Player::subtitles_for_frame (DCPTime time) const
612 {
613         list<PositionImage> subtitles;
614
615         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
616
617                 /* Image subtitles */
618                 list<PositionImage> c = transform_image_subtitles (i.image);
619                 copy (c.begin(), c.end(), back_inserter (subtitles));
620
621                 /* Text subtitles (rendered to an image) */
622                 if (!i.text.empty ()) {
623                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
624                         copy (s.begin(), s.end(), back_inserter (subtitles));
625                 }
626         }
627
628         if (subtitles.empty ()) {
629                 return optional<PositionImage> ();
630         }
631
632         return merge (subtitles);
633 }
634
635 void
636 Player::video (weak_ptr<Piece> wp, ContentVideo video)
637 {
638         shared_ptr<Piece> piece = wp.lock ();
639         if (!piece) {
640                 return;
641         }
642
643         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
644         if (frc.skip && (video.frame % 2) == 1) {
645                 return;
646         }
647
648         /* Time and period of the frame we will emit */
649         DCPTime const time = content_video_to_dcp (piece, video.frame);
650         DCPTimePeriod const period (time, time + one_video_frame());
651
652         /* Discard if it's outside the content's period or if it's before the last accurate seek */
653         if (
654                 time < piece->content->position() ||
655                 time >= piece->content->end() ||
656                 (_last_video_time && time < *_last_video_time)) {
657                 return;
658         }
659
660         /* Fill gaps that we discover now that we have some video which needs to be emitted */
661
662         if (_last_video_time) {
663                 /* XXX: this may not work for 3D */
664                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
665                 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
666                         LastVideoMap::const_iterator k = _last_video.find (wp);
667                         if (k != _last_video.end ()) {
668                                 emit_video (k->second, j);
669                         } else {
670                                 emit_video (black_player_video_frame(), j);
671                         }
672                 }
673         }
674
675         _last_video[wp].reset (
676                 new PlayerVideo (
677                         video.image,
678                         piece->content->video->crop (),
679                         piece->content->video->fade (video.frame),
680                         piece->content->video->scale().size (
681                                 piece->content->video, _video_container_size, _film->frame_size ()
682                                 ),
683                         _video_container_size,
684                         video.eyes,
685                         video.part,
686                         piece->content->video->colour_conversion ()
687                         )
688                 );
689
690         emit_video (_last_video[wp], time);
691 }
692
693 void
694 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
695 {
696         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
697
698         shared_ptr<Piece> piece = wp.lock ();
699         if (!piece) {
700                 return;
701         }
702
703         shared_ptr<AudioContent> content = piece->content->audio;
704         DCPOMATIC_ASSERT (content);
705
706         /* Compute time in the DCP */
707         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
708         /* And the end of this block in the DCP */
709         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
710
711         /* Remove anything that comes before the start or after the end of the content */
712         if (time < piece->content->position()) {
713                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
714                 if (!cut.first) {
715                         /* This audio is entirely discarded */
716                         return;
717                 }
718                 content_audio.audio = cut.first;
719                 time = cut.second;
720         } else if (time > piece->content->end()) {
721                 /* Discard it all */
722                 return;
723         } else if (end > piece->content->end()) {
724                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
725                 if (remaining_frames == 0) {
726                         return;
727                 }
728                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
729                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
730                 content_audio.audio = cut;
731         }
732
733         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
734
735         /* Gain */
736
737         if (content->gain() != 0) {
738                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
739                 gain->apply_gain (content->gain ());
740                 content_audio.audio = gain;
741         }
742
743         /* Remap */
744
745         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
746
747         /* Process */
748
749         if (_audio_processor) {
750                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
751         }
752
753         /* Push */
754
755         _audio_merger.push (content_audio.audio, time);
756         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
757         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
758 }
759
760 void
761 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
762 {
763         shared_ptr<Piece> piece = wp.lock ();
764         if (!piece) {
765                 return;
766         }
767
768         /* Apply content's subtitle offsets */
769         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
770         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
771
772         /* Apply content's subtitle scale */
773         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
774         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
775
776         /* Apply a corrective translation to keep the subtitle centred after that scale */
777         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
778         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
779
780         PlayerSubtitles ps;
781         ps.image.push_back (subtitle.sub);
782         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
783
784         _active_subtitles.add_from (wp, ps, from);
785 }
786
787 void
788 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
789 {
790         shared_ptr<Piece> piece = wp.lock ();
791         if (!piece) {
792                 return;
793         }
794
795         PlayerSubtitles ps;
796         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
797
798         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
799                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
800                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
801                 float const xs = piece->content->subtitle->x_scale();
802                 float const ys = piece->content->subtitle->y_scale();
803                 float size = s.size();
804
805                 /* Adjust size to express the common part of the scaling;
806                    e.g. if xs = ys = 0.5 we scale size by 2.
807                 */
808                 if (xs > 1e-5 && ys > 1e-5) {
809                         size *= 1 / min (1 / xs, 1 / ys);
810                 }
811                 s.set_size (size);
812
813                 /* Then express aspect ratio changes */
814                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
815                         s.set_aspect_adjust (xs / ys);
816                 }
817
818                 s.set_in (dcp::Time(from.seconds(), 1000));
819                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
820                 ps.add_fonts (piece->content->subtitle->fonts ());
821         }
822
823         _active_subtitles.add_from (wp, ps, from);
824 }
825
826 void
827 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
828 {
829         if (!_active_subtitles.have (wp)) {
830                 return;
831         }
832
833         shared_ptr<Piece> piece = wp.lock ();
834         if (!piece) {
835                 return;
836         }
837
838         DCPTime const dcp_to = content_time_to_dcp (piece, to);
839
840         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
841
842         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
843                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
844         }
845 }
846
847 void
848 Player::seek (DCPTime time, bool accurate)
849 {
850         if (_audio_processor) {
851                 _audio_processor->flush ();
852         }
853
854         _audio_merger.clear ();
855         _active_subtitles.clear ();
856
857         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
858                 if (time < i->content->position()) {
859                         /* Before; seek to 0 */
860                         i->decoder->seek (ContentTime(), accurate);
861                         i->done = false;
862                 } else if (i->content->position() <= time && time < i->content->end()) {
863                         /* During; seek to position */
864                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
865                         i->done = false;
866                 } else {
867                         /* After; this piece is done */
868                         i->done = true;
869                 }
870         }
871
872         if (accurate) {
873                 _last_video_time = time;
874                 _last_audio_time = time;
875         } else {
876                 _last_video_time = optional<DCPTime>();
877                 _last_audio_time = optional<DCPTime>();
878         }
879
880         _black.set_position (time);
881         _silent.set_position (time);
882
883         _last_video.clear ();
884 }
885
886 void
887 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
888 {
889         optional<PositionImage> subtitles = subtitles_for_frame (time);
890         if (subtitles) {
891                 pv->set_subtitle (subtitles.get ());
892         }
893
894         Video (pv, time);
895
896         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
897                 _last_video_time = time + one_video_frame();
898                 _active_subtitles.clear_before (time);
899         }
900 }
901
902 void
903 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
904 {
905         Audio (data, time);
906         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
907 }
908
909 void
910 Player::fill_audio (DCPTimePeriod period)
911 {
912         if (period.from == period.to) {
913                 return;
914         }
915
916         DCPOMATIC_ASSERT (period.from < period.to);
917
918         DCPTime t = period.from;
919         while (t < period.to) {
920                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
921                 Frame const samples = block.frames_round(_film->audio_frame_rate());
922                 if (samples) {
923                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
924                         silence->make_silent ();
925                         emit_audio (silence, t);
926                 }
927                 t += block;
928         }
929 }
930
931 DCPTime
932 Player::one_video_frame () const
933 {
934         return DCPTime::from_frames (1, _film->video_frame_rate ());
935 }
936
937 pair<shared_ptr<AudioBuffers>, DCPTime>
938 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
939 {
940         DCPTime const discard_time = discard_to - time;
941         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
942         Frame remaining_frames = audio->frames() - discard_frames;
943         if (remaining_frames <= 0) {
944                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
945         }
946         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
947         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
948         return make_pair(cut, time + discard_time);
949 }