Fix SNAFU with silence/black.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
56 #include <stdint.h>
57 #include <algorithm>
58 #include <iostream>
59
60 #include "i18n.h"
61
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
81         : _film (film)
82         , _playlist (playlist)
83         , _have_valid_pieces (false)
84         , _ignore_video (false)
85         , _ignore_subtitle (false)
86         , _always_burn_subtitles (false)
87         , _fast (false)
88         , _play_referenced (false)
89         , _audio_merger (_film->audio_frame_rate())
90 {
91         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94         set_video_container_size (_film->frame_size ());
95
96         film_changed (Film::AUDIO_PROCESSOR);
97
98         seek (DCPTime (), true);
99 }
100
101 void
102 Player::setup_pieces ()
103 {
104         _pieces.clear ();
105
106         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
107
108                 if (!i->paths_valid ()) {
109                         continue;
110                 }
111
112                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
114
115                 if (!decoder) {
116                         /* Not something that we can decode; e.g. Atmos content */
117                         continue;
118                 }
119
120                 if (decoder->video && _ignore_video) {
121                         decoder->video->set_ignore ();
122                 }
123
124                 if (decoder->subtitle && _ignore_subtitle) {
125                         decoder->subtitle->set_ignore ();
126                 }
127
128                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129                 if (dcp && _play_referenced) {
130                         dcp->set_decode_referenced ();
131                 }
132
133                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134                 _pieces.push_back (piece);
135
136                 if (decoder->video) {
137                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
138                 }
139
140                 if (decoder->audio) {
141                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
142                 }
143
144                 if (decoder->subtitle) {
145                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
148                 }
149         }
150
151         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152                 if (i->content->audio) {
153                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154                                 _stream_states[j] = StreamState (i, i->content->position ());
155                         }
156                 }
157         }
158
159         _last_video_time = DCPTime ();
160         _last_audio_time = DCPTime ();
161         _have_valid_pieces = true;
162 }
163
164 void
165 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
166 {
167         shared_ptr<Content> c = w.lock ();
168         if (!c) {
169                 return;
170         }
171
172         if (
173                 property == ContentProperty::POSITION ||
174                 property == ContentProperty::LENGTH ||
175                 property == ContentProperty::TRIM_START ||
176                 property == ContentProperty::TRIM_END ||
177                 property == ContentProperty::PATH ||
178                 property == VideoContentProperty::FRAME_TYPE ||
179                 property == DCPContentProperty::NEEDS_ASSETS ||
180                 property == DCPContentProperty::NEEDS_KDM ||
181                 property == SubtitleContentProperty::COLOUR ||
182                 property == SubtitleContentProperty::OUTLINE ||
183                 property == SubtitleContentProperty::SHADOW ||
184                 property == SubtitleContentProperty::EFFECT_COLOUR ||
185                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
186                 property == VideoContentProperty::COLOUR_CONVERSION
187                 ) {
188
189                 _have_valid_pieces = false;
190                 Changed (frequent);
191
192         } else if (
193                 property == SubtitleContentProperty::LINE_SPACING ||
194                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
195                 property == SubtitleContentProperty::Y_SCALE ||
196                 property == SubtitleContentProperty::FADE_IN ||
197                 property == SubtitleContentProperty::FADE_OUT ||
198                 property == ContentProperty::VIDEO_FRAME_RATE ||
199                 property == SubtitleContentProperty::USE ||
200                 property == SubtitleContentProperty::X_OFFSET ||
201                 property == SubtitleContentProperty::Y_OFFSET ||
202                 property == SubtitleContentProperty::X_SCALE ||
203                 property == SubtitleContentProperty::FONTS ||
204                 property == VideoContentProperty::CROP ||
205                 property == VideoContentProperty::SCALE ||
206                 property == VideoContentProperty::FADE_IN ||
207                 property == VideoContentProperty::FADE_OUT
208                 ) {
209
210                 Changed (frequent);
211         }
212 }
213
214 void
215 Player::set_video_container_size (dcp::Size s)
216 {
217         if (s == _video_container_size) {
218                 return;
219         }
220
221         _video_container_size = s;
222
223         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
224         _black_image->make_black ();
225
226         Changed (false);
227 }
228
229 void
230 Player::playlist_changed ()
231 {
232         _have_valid_pieces = false;
233         Changed (false);
234 }
235
236 void
237 Player::film_changed (Film::Property p)
238 {
239         /* Here we should notice Film properties that affect our output, and
240            alert listeners that our output now would be different to how it was
241            last time we were run.
242         */
243
244         if (p == Film::CONTAINER) {
245                 Changed (false);
246         } else if (p == Film::VIDEO_FRAME_RATE) {
247                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
248                    so we need new pieces here.
249                 */
250                 _have_valid_pieces = false;
251                 Changed (false);
252         } else if (p == Film::AUDIO_PROCESSOR) {
253                 if (_film->audio_processor ()) {
254                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
255                 }
256         }
257 }
258
259 list<PositionImage>
260 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
261 {
262         list<PositionImage> all;
263
264         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
265                 if (!i->image) {
266                         continue;
267                 }
268
269                 /* We will scale the subtitle up to fit _video_container_size */
270                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
271
272                 /* Then we need a corrective translation, consisting of two parts:
273                  *
274                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
275                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
276                  *
277                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
278                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
279                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
280                  *
281                  * Combining these two translations gives these expressions.
282                  */
283
284                 all.push_back (
285                         PositionImage (
286                                 i->image->scale (
287                                         scaled_size,
288                                         dcp::YUV_TO_RGB_REC601,
289                                         i->image->pixel_format (),
290                                         true,
291                                         _fast
292                                         ),
293                                 Position<int> (
294                                         lrint (_video_container_size.width * i->rectangle.x),
295                                         lrint (_video_container_size.height * i->rectangle.y)
296                                         )
297                                 )
298                         );
299         }
300
301         return all;
302 }
303
304 shared_ptr<PlayerVideo>
305 Player::black_player_video_frame () const
306 {
307         return shared_ptr<PlayerVideo> (
308                 new PlayerVideo (
309                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
310                         Crop (),
311                         optional<double> (),
312                         _video_container_size,
313                         _video_container_size,
314                         EYES_BOTH,
315                         PART_WHOLE,
316                         PresetColourConversion::all().front().conversion
317                 )
318         );
319 }
320
321 Frame
322 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
323 {
324         DCPTime s = t - piece->content->position ();
325         s = min (piece->content->length_after_trim(), s);
326         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
327
328         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
329            then convert that ContentTime to frames at the content's rate.  However this fails for
330            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
331            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
332
333            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
334         */
335         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
336 }
337
338 DCPTime
339 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
340 {
341         /* See comment in dcp_to_content_video */
342         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
343         return max (DCPTime (), d + piece->content->position ());
344 }
345
346 Frame
347 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
348 {
349         DCPTime s = t - piece->content->position ();
350         s = min (piece->content->length_after_trim(), s);
351         /* See notes in dcp_to_content_video */
352         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
353 }
354
355 DCPTime
356 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
357 {
358         /* See comment in dcp_to_content_video */
359         return DCPTime::from_frames (f, _film->audio_frame_rate())
360                 - DCPTime (piece->content->trim_start(), piece->frc)
361                 + piece->content->position();
362 }
363
364 ContentTime
365 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
366 {
367         DCPTime s = t - piece->content->position ();
368         s = min (piece->content->length_after_trim(), s);
369         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
370 }
371
372 DCPTime
373 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
374 {
375         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
376 }
377
378 list<shared_ptr<Font> >
379 Player::get_subtitle_fonts ()
380 {
381         if (!_have_valid_pieces) {
382                 setup_pieces ();
383         }
384
385         list<shared_ptr<Font> > fonts;
386         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
387                 if (p->content->subtitle) {
388                         /* XXX: things may go wrong if there are duplicate font IDs
389                            with different font files.
390                         */
391                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
392                         copy (f.begin(), f.end(), back_inserter (fonts));
393                 }
394         }
395
396         return fonts;
397 }
398
399 /** Set this player never to produce any video data */
400 void
401 Player::set_ignore_video ()
402 {
403         _ignore_video = true;
404 }
405
406 void
407 Player::set_ignore_subtitle ()
408 {
409         _ignore_subtitle = true;
410 }
411
412 /** Set whether or not this player should always burn text subtitles into the image,
413  *  regardless of the content settings.
414  *  @param burn true to always burn subtitles, false to obey content settings.
415  */
416 void
417 Player::set_always_burn_subtitles (bool burn)
418 {
419         _always_burn_subtitles = burn;
420 }
421
422 void
423 Player::set_fast ()
424 {
425         _fast = true;
426         _have_valid_pieces = false;
427 }
428
429 void
430 Player::set_play_referenced ()
431 {
432         _play_referenced = true;
433         _have_valid_pieces = false;
434 }
435
436 list<ReferencedReelAsset>
437 Player::get_reel_assets ()
438 {
439         list<ReferencedReelAsset> a;
440
441         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
442                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
443                 if (!j) {
444                         continue;
445                 }
446
447                 scoped_ptr<DCPDecoder> decoder;
448                 try {
449                         decoder.reset (new DCPDecoder (j, _film->log()));
450                 } catch (...) {
451                         return a;
452                 }
453
454                 int64_t offset = 0;
455                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
456
457                         DCPOMATIC_ASSERT (j->video_frame_rate ());
458                         double const cfr = j->video_frame_rate().get();
459                         Frame const trim_start = j->trim_start().frames_round (cfr);
460                         Frame const trim_end = j->trim_end().frames_round (cfr);
461                         int const ffr = _film->video_frame_rate ();
462
463                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
464                         if (j->reference_video ()) {
465                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
466                                 DCPOMATIC_ASSERT (ra);
467                                 ra->set_entry_point (ra->entry_point() + trim_start);
468                                 ra->set_duration (ra->duration() - trim_start - trim_end);
469                                 a.push_back (
470                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
471                                         );
472                         }
473
474                         if (j->reference_audio ()) {
475                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
476                                 DCPOMATIC_ASSERT (ra);
477                                 ra->set_entry_point (ra->entry_point() + trim_start);
478                                 ra->set_duration (ra->duration() - trim_start - trim_end);
479                                 a.push_back (
480                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
481                                         );
482                         }
483
484                         if (j->reference_subtitle ()) {
485                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
486                                 DCPOMATIC_ASSERT (ra);
487                                 ra->set_entry_point (ra->entry_point() + trim_start);
488                                 ra->set_duration (ra->duration() - trim_start - trim_end);
489                                 a.push_back (
490                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
491                                         );
492                         }
493
494                         /* Assume that main picture duration is the length of the reel */
495                         offset += k->main_picture()->duration ();
496                 }
497         }
498
499         return a;
500 }
501
502 bool
503 Player::pass ()
504 {
505         if (!_have_valid_pieces) {
506                 setup_pieces ();
507         }
508
509         if (_playlist->length() == DCPTime()) {
510                 /* Special case of an empty Film; just give one black frame */
511                 emit_video (black_player_video_frame(), DCPTime());
512                 return true;
513         }
514
515         /* Find the decoder or empty which is farthest behind where we are and make it emit some data */
516
517         shared_ptr<Piece> earliest_content;
518         optional<DCPTime> earliest_time;
519
520         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
521                 if (!i->done) {
522                         DCPTime const t = content_time_to_dcp (i, i->decoder->position());
523                         /* Given two choices at the same time, pick the one with a subtitle so we see it before
524                            the video.
525                         */
526                         if (!earliest_time || t < *earliest_time || (t == *earliest_time && i->decoder->subtitle)) {
527                                 earliest_time = t;
528                                 earliest_content = i;
529                         }
530                 }
531         }
532
533         bool done = false;
534
535         enum {
536                 NONE,
537                 CONTENT,
538                 BLACK,
539                 SILENT
540         } which = NONE;
541
542         if (earliest_content) {
543                 which = CONTENT;
544         }
545
546         if (!_black.done() && (!earliest_time || _black.position() < *earliest_time)) {
547                 earliest_time = _black.position ();
548                 which = BLACK;
549         }
550
551         if (!_silent.done() && (!earliest_time || _silent.position() < *earliest_time)) {
552                 earliest_time = _silent.position ();
553                 which = SILENT;
554         }
555
556         switch (which) {
557         case CONTENT:
558                 earliest_content->done = earliest_content->decoder->pass ();
559                 break;
560         case BLACK:
561                 emit_video (black_player_video_frame(), _black.position());
562                 _black.set_position (_black.position() + one_video_frame());
563                 break;
564         case SILENT:
565         {
566                 DCPTimePeriod period (_silent.period_at_position());
567                 if (period.duration() > one_video_frame()) {
568                         period.to = period.from + one_video_frame();
569                 }
570                 fill_audio (period);
571                 _silent.set_position (period.to);
572                 break;
573         }
574         case NONE:
575                 done = true;
576                 break;
577         }
578
579         /* Emit any audio that is ready */
580
581         DCPTime pull_to = _film->length ();
582         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
583                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
584                         pull_to = i->second.last_push_end;
585                 }
586         }
587
588         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
589         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
590                 if (_last_audio_time && i->second < *_last_audio_time) {
591                         /* There has been an accurate seek and we have received some audio before the seek time;
592                            discard it.
593                         */
594                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
595                         if (!cut.first) {
596                                 continue;
597                         }
598                         *i = cut;
599                 }
600
601                 emit_audio (i->first, i->second);
602         }
603
604         return done;
605 }
606
607 optional<PositionImage>
608 Player::subtitles_for_frame (DCPTime time) const
609 {
610         list<PositionImage> subtitles;
611
612         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
613
614                 /* Image subtitles */
615                 list<PositionImage> c = transform_image_subtitles (i.image);
616                 copy (c.begin(), c.end(), back_inserter (subtitles));
617
618                 /* Text subtitles (rendered to an image) */
619                 if (!i.text.empty ()) {
620                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
621                         copy (s.begin(), s.end(), back_inserter (subtitles));
622                 }
623         }
624
625         if (subtitles.empty ()) {
626                 return optional<PositionImage> ();
627         }
628
629         return merge (subtitles);
630 }
631
632 void
633 Player::video (weak_ptr<Piece> wp, ContentVideo video)
634 {
635         shared_ptr<Piece> piece = wp.lock ();
636         if (!piece) {
637                 return;
638         }
639
640         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
641         if (frc.skip && (video.frame % 2) == 1) {
642                 return;
643         }
644
645         /* Time and period of the frame we will emit */
646         DCPTime const time = content_video_to_dcp (piece, video.frame);
647         DCPTimePeriod const period (time, time + one_video_frame());
648
649         /* Discard if it's outside the content's period or if it's before the last accurate seek */
650         if (
651                 time < piece->content->position() ||
652                 time >= piece->content->end() ||
653                 (_last_video_time && time < *_last_video_time)) {
654                 return;
655         }
656
657         /* Fill gaps that we discover now that we have some video which needs to be emitted */
658
659         if (_last_video_time) {
660                 /* XXX: this may not work for 3D */
661                 DCPTime fill_from = max (*_last_video_time, piece->content->position());
662                 for (DCPTime j = fill_from; j < time; j += one_video_frame()) {
663                         LastVideoMap::const_iterator k = _last_video.find (wp);
664                         if (k != _last_video.end ()) {
665                                 emit_video (k->second, j);
666                         } else {
667                                 emit_video (black_player_video_frame(), j);
668                         }
669                 }
670         }
671
672         _last_video[wp].reset (
673                 new PlayerVideo (
674                         video.image,
675                         piece->content->video->crop (),
676                         piece->content->video->fade (video.frame),
677                         piece->content->video->scale().size (
678                                 piece->content->video, _video_container_size, _film->frame_size ()
679                                 ),
680                         _video_container_size,
681                         video.eyes,
682                         video.part,
683                         piece->content->video->colour_conversion ()
684                         )
685                 );
686
687         emit_video (_last_video[wp], time);
688 }
689
690 void
691 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
692 {
693         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
694
695         shared_ptr<Piece> piece = wp.lock ();
696         if (!piece) {
697                 return;
698         }
699
700         shared_ptr<AudioContent> content = piece->content->audio;
701         DCPOMATIC_ASSERT (content);
702
703         /* Compute time in the DCP */
704         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame);
705         /* And the end of this block in the DCP */
706         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
707
708         /* Remove anything that comes before the start or after the end of the content */
709         if (time < piece->content->position()) {
710                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
711                 if (!cut.first) {
712                         /* This audio is entirely discarded */
713                         return;
714                 }
715                 content_audio.audio = cut.first;
716                 time = cut.second;
717         } else if (time > piece->content->end()) {
718                 /* Discard it all */
719                 return;
720         } else if (end > piece->content->end()) {
721                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
722                 if (remaining_frames == 0) {
723                         return;
724                 }
725                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
726                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
727                 content_audio.audio = cut;
728         }
729
730         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
731
732         /* Gain */
733
734         if (content->gain() != 0) {
735                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
736                 gain->apply_gain (content->gain ());
737                 content_audio.audio = gain;
738         }
739
740         /* Remap */
741
742         content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
743
744         /* Process */
745
746         if (_audio_processor) {
747                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
748         }
749
750         /* Push */
751
752         _audio_merger.push (content_audio.audio, time);
753         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
754         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
755 }
756
757 void
758 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
759 {
760         shared_ptr<Piece> piece = wp.lock ();
761         if (!piece) {
762                 return;
763         }
764
765         /* Apply content's subtitle offsets */
766         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
767         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
768
769         /* Apply content's subtitle scale */
770         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
771         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
772
773         /* Apply a corrective translation to keep the subtitle centred after that scale */
774         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
775         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
776
777         PlayerSubtitles ps;
778         ps.image.push_back (subtitle.sub);
779         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
780
781         _active_subtitles.add_from (wp, ps, from);
782 }
783
784 void
785 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
786 {
787         shared_ptr<Piece> piece = wp.lock ();
788         if (!piece) {
789                 return;
790         }
791
792         PlayerSubtitles ps;
793         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
794
795         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
796                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
797                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
798                 float const xs = piece->content->subtitle->x_scale();
799                 float const ys = piece->content->subtitle->y_scale();
800                 float size = s.size();
801
802                 /* Adjust size to express the common part of the scaling;
803                    e.g. if xs = ys = 0.5 we scale size by 2.
804                 */
805                 if (xs > 1e-5 && ys > 1e-5) {
806                         size *= 1 / min (1 / xs, 1 / ys);
807                 }
808                 s.set_size (size);
809
810                 /* Then express aspect ratio changes */
811                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
812                         s.set_aspect_adjust (xs / ys);
813                 }
814
815                 s.set_in (dcp::Time(from.seconds(), 1000));
816                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
817                 ps.add_fonts (piece->content->subtitle->fonts ());
818         }
819
820         _active_subtitles.add_from (wp, ps, from);
821 }
822
823 void
824 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
825 {
826         if (!_active_subtitles.have (wp)) {
827                 return;
828         }
829
830         shared_ptr<Piece> piece = wp.lock ();
831         if (!piece) {
832                 return;
833         }
834
835         DCPTime const dcp_to = content_time_to_dcp (piece, to);
836
837         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
838
839         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
840                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
841         }
842 }
843
844 void
845 Player::seek (DCPTime time, bool accurate)
846 {
847         if (_audio_processor) {
848                 _audio_processor->flush ();
849         }
850
851         _audio_merger.clear ();
852         _active_subtitles.clear ();
853
854         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
855                 if (time < i->content->position()) {
856                         /* Before; seek to 0 */
857                         i->decoder->seek (ContentTime(), accurate);
858                         i->done = false;
859                 } else if (i->content->position() <= time && time < i->content->end()) {
860                         /* During; seek to position */
861                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
862                         i->done = false;
863                 } else {
864                         /* After; this piece is done */
865                         i->done = true;
866                 }
867         }
868
869         if (accurate) {
870                 _last_video_time = time;
871                 _last_audio_time = time;
872         } else {
873                 _last_video_time = optional<DCPTime>();
874                 _last_audio_time = optional<DCPTime>();
875         }
876
877         _black.set_position (time);
878         _silent.set_position (time);
879
880         _last_video.clear ();
881 }
882
883 void
884 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
885 {
886         optional<PositionImage> subtitles = subtitles_for_frame (time);
887         if (subtitles) {
888                 pv->set_subtitle (subtitles.get ());
889         }
890
891         Video (pv, time);
892
893         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
894                 _last_video_time = time + one_video_frame();
895                 _active_subtitles.clear_before (time);
896         }
897 }
898
899 void
900 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
901 {
902         Audio (data, time);
903         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate());
904 }
905
906 void
907 Player::fill_audio (DCPTimePeriod period)
908 {
909         if (period.from == period.to) {
910                 return;
911         }
912
913         DCPOMATIC_ASSERT (period.from < period.to);
914
915         DCPTime t = period.from;
916         while (t < period.to) {
917                 DCPTime block = min (DCPTime::from_seconds (0.5), period.to - t);
918                 Frame const samples = block.frames_round(_film->audio_frame_rate());
919                 if (samples) {
920                         shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
921                         silence->make_silent ();
922                         emit_audio (silence, t);
923                 }
924                 t += block;
925         }
926 }
927
928 DCPTime
929 Player::one_video_frame () const
930 {
931         return DCPTime::from_frames (1, _film->video_frame_rate ());
932 }
933
934 pair<shared_ptr<AudioBuffers>, DCPTime>
935 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
936 {
937         DCPTime const discard_time = discard_to - time;
938         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
939         Frame remaining_frames = audio->frames() - discard_frames;
940         if (remaining_frames <= 0) {
941                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
942         }
943         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
944         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
945         return make_pair(cut, time + discard_time);
946 }