Remove debug.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "compose.hpp"
51 #include <dcp/reel.h>
52 #include <dcp/reel_sound_asset.h>
53 #include <dcp/reel_subtitle_asset.h>
54 #include <dcp/reel_picture_asset.h>
55 #include <boost/foreach.hpp>
56 #include <stdint.h>
57 #include <algorithm>
58 #include <iostream>
59
60 #include "i18n.h"
61
62 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
63
64 using std::list;
65 using std::cout;
66 using std::min;
67 using std::max;
68 using std::min;
69 using std::vector;
70 using std::pair;
71 using std::map;
72 using std::make_pair;
73 using std::copy;
74 using boost::shared_ptr;
75 using boost::weak_ptr;
76 using boost::dynamic_pointer_cast;
77 using boost::optional;
78 using boost::scoped_ptr;
79
80 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
81         : _film (film)
82         , _playlist (playlist)
83         , _have_valid_pieces (false)
84         , _ignore_video (false)
85         , _ignore_audio (false)
86         , _always_burn_subtitles (false)
87         , _fast (false)
88         , _play_referenced (false)
89         , _audio_merger (_film->audio_frame_rate())
90 {
91         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
92         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
93         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
94         set_video_container_size (_film->frame_size ());
95
96         film_changed (Film::AUDIO_PROCESSOR);
97
98         seek (DCPTime (), true);
99 }
100
101 void
102 Player::setup_pieces ()
103 {
104         _pieces.clear ();
105
106         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
107
108                 if (!i->paths_valid ()) {
109                         continue;
110                 }
111
112                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
113                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
114
115                 if (!decoder) {
116                         /* Not something that we can decode; e.g. Atmos content */
117                         continue;
118                 }
119
120                 if (decoder->video && _ignore_video) {
121                         decoder->video->set_ignore ();
122                 }
123
124                 if (decoder->audio && _ignore_audio) {
125                         decoder->audio->set_ignore ();
126                 }
127
128                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
129                 if (dcp && _play_referenced) {
130                         dcp->set_decode_referenced ();
131                 }
132
133                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
134                 _pieces.push_back (piece);
135
136                 if (decoder->video) {
137                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
138                 }
139
140                 if (decoder->audio) {
141                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
142                 }
143
144                 if (decoder->subtitle) {
145                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
146                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
147                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
148                 }
149         }
150
151         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
152                 if (i->content->audio) {
153                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
154                                 _stream_states[j] = StreamState (i, i->content->position ());
155                         }
156                 }
157         }
158
159         if (!_play_referenced) {
160                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
161                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
162                         if (dc) {
163                                 if (dc->reference_video()) {
164                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
165                                 }
166                                 if (dc->reference_audio()) {
167                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
168                                 }
169                         }
170                 }
171         }
172
173         _last_video_time = DCPTime ();
174         _last_audio_time = DCPTime ();
175         _have_valid_pieces = true;
176 }
177
178 void
179 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
180 {
181         shared_ptr<Content> c = w.lock ();
182         if (!c) {
183                 return;
184         }
185
186         if (
187                 property == ContentProperty::POSITION ||
188                 property == ContentProperty::LENGTH ||
189                 property == ContentProperty::TRIM_START ||
190                 property == ContentProperty::TRIM_END ||
191                 property == ContentProperty::PATH ||
192                 property == VideoContentProperty::FRAME_TYPE ||
193                 property == DCPContentProperty::NEEDS_ASSETS ||
194                 property == DCPContentProperty::NEEDS_KDM ||
195                 property == SubtitleContentProperty::COLOUR ||
196                 property == SubtitleContentProperty::OUTLINE ||
197                 property == SubtitleContentProperty::SHADOW ||
198                 property == SubtitleContentProperty::EFFECT_COLOUR ||
199                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
200                 property == VideoContentProperty::COLOUR_CONVERSION
201                 ) {
202
203                 _have_valid_pieces = false;
204                 Changed (frequent);
205
206         } else if (
207                 property == SubtitleContentProperty::LINE_SPACING ||
208                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
209                 property == SubtitleContentProperty::Y_SCALE ||
210                 property == SubtitleContentProperty::FADE_IN ||
211                 property == SubtitleContentProperty::FADE_OUT ||
212                 property == ContentProperty::VIDEO_FRAME_RATE ||
213                 property == SubtitleContentProperty::USE ||
214                 property == SubtitleContentProperty::X_OFFSET ||
215                 property == SubtitleContentProperty::Y_OFFSET ||
216                 property == SubtitleContentProperty::X_SCALE ||
217                 property == SubtitleContentProperty::FONTS ||
218                 property == VideoContentProperty::CROP ||
219                 property == VideoContentProperty::SCALE ||
220                 property == VideoContentProperty::FADE_IN ||
221                 property == VideoContentProperty::FADE_OUT
222                 ) {
223
224                 Changed (frequent);
225         }
226 }
227
228 void
229 Player::set_video_container_size (dcp::Size s)
230 {
231         if (s == _video_container_size) {
232                 return;
233         }
234
235         _video_container_size = s;
236
237         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
238         _black_image->make_black ();
239
240         Changed (false);
241 }
242
243 void
244 Player::playlist_changed ()
245 {
246         _have_valid_pieces = false;
247         Changed (false);
248 }
249
250 void
251 Player::film_changed (Film::Property p)
252 {
253         /* Here we should notice Film properties that affect our output, and
254            alert listeners that our output now would be different to how it was
255            last time we were run.
256         */
257
258         if (p == Film::CONTAINER) {
259                 Changed (false);
260         } else if (p == Film::VIDEO_FRAME_RATE) {
261                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
262                    so we need new pieces here.
263                 */
264                 _have_valid_pieces = false;
265                 Changed (false);
266         } else if (p == Film::AUDIO_PROCESSOR) {
267                 if (_film->audio_processor ()) {
268                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
269                 }
270         }
271 }
272
273 list<PositionImage>
274 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
275 {
276         list<PositionImage> all;
277
278         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
279                 if (!i->image) {
280                         continue;
281                 }
282
283                 /* We will scale the subtitle up to fit _video_container_size */
284                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
285
286                 /* Then we need a corrective translation, consisting of two parts:
287                  *
288                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
289                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
290                  *
291                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
292                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
293                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
294                  *
295                  * Combining these two translations gives these expressions.
296                  */
297
298                 all.push_back (
299                         PositionImage (
300                                 i->image->scale (
301                                         scaled_size,
302                                         dcp::YUV_TO_RGB_REC601,
303                                         i->image->pixel_format (),
304                                         true,
305                                         _fast
306                                         ),
307                                 Position<int> (
308                                         lrint (_video_container_size.width * i->rectangle.x),
309                                         lrint (_video_container_size.height * i->rectangle.y)
310                                         )
311                                 )
312                         );
313         }
314
315         return all;
316 }
317
318 shared_ptr<PlayerVideo>
319 Player::black_player_video_frame () const
320 {
321         return shared_ptr<PlayerVideo> (
322                 new PlayerVideo (
323                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
324                         Crop (),
325                         optional<double> (),
326                         _video_container_size,
327                         _video_container_size,
328                         EYES_BOTH,
329                         PART_WHOLE,
330                         PresetColourConversion::all().front().conversion
331                 )
332         );
333 }
334
335 Frame
336 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
337 {
338         DCPTime s = t - piece->content->position ();
339         s = min (piece->content->length_after_trim(), s);
340         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
341
342         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
343            then convert that ContentTime to frames at the content's rate.  However this fails for
344            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
345            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
346
347            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
348         */
349         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
350 }
351
352 DCPTime
353 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
354 {
355         /* See comment in dcp_to_content_video */
356         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
357         return max (DCPTime (), d + piece->content->position ());
358 }
359
360 Frame
361 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
362 {
363         DCPTime s = t - piece->content->position ();
364         s = min (piece->content->length_after_trim(), s);
365         /* See notes in dcp_to_content_video */
366         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
367 }
368
369 DCPTime
370 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
371 {
372         /* See comment in dcp_to_content_video */
373         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
374         return max (DCPTime (), d + piece->content->position ());
375 }
376
377 ContentTime
378 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
379 {
380         DCPTime s = t - piece->content->position ();
381         s = min (piece->content->length_after_trim(), s);
382         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
383 }
384
385 DCPTime
386 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
387 {
388         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
389 }
390
391 list<shared_ptr<Font> >
392 Player::get_subtitle_fonts ()
393 {
394         if (!_have_valid_pieces) {
395                 setup_pieces ();
396         }
397
398         list<shared_ptr<Font> > fonts;
399         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
400                 if (p->content->subtitle) {
401                         /* XXX: things may go wrong if there are duplicate font IDs
402                            with different font files.
403                         */
404                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
405                         copy (f.begin(), f.end(), back_inserter (fonts));
406                 }
407         }
408
409         return fonts;
410 }
411
412 /** Set this player never to produce any video data */
413 void
414 Player::set_ignore_video ()
415 {
416         _ignore_video = true;
417 }
418
419 /** Set whether or not this player should always burn text subtitles into the image,
420  *  regardless of the content settings.
421  *  @param burn true to always burn subtitles, false to obey content settings.
422  */
423 void
424 Player::set_always_burn_subtitles (bool burn)
425 {
426         _always_burn_subtitles = burn;
427 }
428
429 void
430 Player::set_fast ()
431 {
432         _fast = true;
433         _have_valid_pieces = false;
434 }
435
436 void
437 Player::set_play_referenced ()
438 {
439         _play_referenced = true;
440         _have_valid_pieces = false;
441 }
442
443 list<ReferencedReelAsset>
444 Player::get_reel_assets ()
445 {
446         list<ReferencedReelAsset> a;
447
448         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
449                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
450                 if (!j) {
451                         continue;
452                 }
453
454                 scoped_ptr<DCPDecoder> decoder;
455                 try {
456                         decoder.reset (new DCPDecoder (j, _film->log()));
457                 } catch (...) {
458                         return a;
459                 }
460
461                 int64_t offset = 0;
462                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
463
464                         DCPOMATIC_ASSERT (j->video_frame_rate ());
465                         double const cfr = j->video_frame_rate().get();
466                         Frame const trim_start = j->trim_start().frames_round (cfr);
467                         Frame const trim_end = j->trim_end().frames_round (cfr);
468                         int const ffr = _film->video_frame_rate ();
469
470                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
471                         if (j->reference_video ()) {
472                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
473                                 DCPOMATIC_ASSERT (ra);
474                                 ra->set_entry_point (ra->entry_point() + trim_start);
475                                 ra->set_duration (ra->duration() - trim_start - trim_end);
476                                 a.push_back (
477                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
478                                         );
479                         }
480
481                         if (j->reference_audio ()) {
482                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
483                                 DCPOMATIC_ASSERT (ra);
484                                 ra->set_entry_point (ra->entry_point() + trim_start);
485                                 ra->set_duration (ra->duration() - trim_start - trim_end);
486                                 a.push_back (
487                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
488                                         );
489                         }
490
491                         if (j->reference_subtitle ()) {
492                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
493                                 DCPOMATIC_ASSERT (ra);
494                                 ra->set_entry_point (ra->entry_point() + trim_start);
495                                 ra->set_duration (ra->duration() - trim_start - trim_end);
496                                 a.push_back (
497                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
498                                         );
499                         }
500
501                         /* Assume that main picture duration is the length of the reel */
502                         offset += k->main_picture()->duration ();
503                 }
504         }
505
506         return a;
507 }
508
509 bool
510 Player::pass ()
511 {
512         if (!_have_valid_pieces) {
513                 setup_pieces ();
514         }
515
516         shared_ptr<Piece> earliest;
517         DCPTime earliest_content;
518
519         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
520                 if (!i->done) {
521                         DCPTime const t = content_time_to_dcp (i, i->decoder->position());
522                         if (!earliest || t < earliest_content) {
523                                 earliest_content = t;
524                                 earliest = i;
525                         }
526                 }
527         }
528
529         /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
530            NOT to fill gaps within content (the latter is done in ::video())
531         */
532         DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
533
534         /* Work out where to fill video from */
535         optional<DCPTime> video_fill_from;
536         if (_last_video_time) {
537                 /* Fill from the last video or seek time */
538                 video_fill_from = _last_video_time;
539         }
540
541         bool filled = false;
542         /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
543            Piece which emits black in spaces (we only emit if we are the earliest thing)
544         */
545         if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
546                 list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
547                 if (!p.empty ()) {
548                         emit_video (black_player_video_frame(), p.front().from);
549                         filled = true;
550                 }
551         } else if (_playlist->length() == DCPTime()) {
552                 /* Special case of an empty Film; just give one black frame */
553                 emit_video (black_player_video_frame(), DCPTime());
554                 filled = true;
555         }
556
557         optional<DCPTime> audio_fill_from;
558         if (_last_audio_time) {
559                 /* Fill from the last audio or seek time */
560                 audio_fill_from = _last_audio_time;
561         }
562
563         DCPTime audio_fill_towards = fill_towards;
564         if (earliest && earliest->content->audio) {
565                 audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
566         }
567
568         if (audio_fill_from && audio_fill_from < audio_fill_towards) {
569                 DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
570                 if (period.duration() > one_video_frame()) {
571                         period.to = period.from + one_video_frame();
572                 }
573                 list<DCPTimePeriod> p = subtract(period, _no_audio);
574                 if (!p.empty ()) {
575                         fill_audio (p.front());
576                         filled = true;
577                 }
578         }
579
580         if (earliest) {
581                 earliest->done = earliest->decoder->pass ();
582         }
583
584         /* Emit any audio that is ready */
585
586         DCPTime pull_to = _playlist->length ();
587         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
588                 if (!i->second.piece->done && i->second.last_push_end < pull_to) {
589                         pull_to = i->second.last_push_end;
590                 }
591         }
592
593         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_to);
594         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
595                 if (_last_audio_time && i->second < *_last_audio_time) {
596                         /* There has been an accurate seek and we have received some audio before the seek time;
597                            discard it.
598                         */
599                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
600                         if (!cut.first) {
601                                 continue;
602                         }
603                         *i = cut;
604                 }
605
606                 if (_last_audio_time) {
607                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
608                 }
609
610                 emit_audio (i->first, i->second);
611         }
612
613         return !earliest && !filled;
614 }
615
616 optional<PositionImage>
617 Player::subtitles_for_frame (DCPTime time) const
618 {
619         list<PositionImage> subtitles;
620
621         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
622
623                 /* Image subtitles */
624                 list<PositionImage> c = transform_image_subtitles (i.image);
625                 copy (c.begin(), c.end(), back_inserter (subtitles));
626
627                 /* Text subtitles (rendered to an image) */
628                 if (!i.text.empty ()) {
629                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
630                         copy (s.begin(), s.end(), back_inserter (subtitles));
631                 }
632         }
633
634         if (subtitles.empty ()) {
635                 return optional<PositionImage> ();
636         }
637
638         return merge (subtitles);
639 }
640
641 void
642 Player::video (weak_ptr<Piece> wp, ContentVideo video)
643 {
644         shared_ptr<Piece> piece = wp.lock ();
645         if (!piece) {
646                 return;
647         }
648
649         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
650         if (frc.skip && (video.frame % 2) == 1) {
651                 return;
652         }
653
654         /* Time and period of the frame we will emit */
655         DCPTime const time = content_video_to_dcp (piece, video.frame);
656         DCPTimePeriod const period (time, time + one_video_frame());
657
658         /* Discard if it's outside the content's period or if it's before the last accurate seek */
659         if (
660                 time < piece->content->position() ||
661                 time >= piece->content->end() ||
662                 (_last_video_time && time < *_last_video_time)) {
663                 return;
664         }
665
666         /* Fill gaps that we discover now that we have some video which needs to be emitted */
667
668         optional<DCPTime> fill_to;
669         if (_last_video_time) {
670                 fill_to = _last_video_time;
671         }
672
673         if (fill_to) {
674                 /* XXX: this may not work for 3D */
675                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*fill_to, time), _no_video)) {
676                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
677                                 LastVideoMap::const_iterator k = _last_video.find (wp);
678                                 if (k != _last_video.end ()) {
679                                         emit_video (k->second, j);
680                                 } else {
681                                         emit_video (black_player_video_frame(), j);
682                                 }
683                         }
684                 }
685         }
686
687         _last_video[wp].reset (
688                 new PlayerVideo (
689                         video.image,
690                         piece->content->video->crop (),
691                         piece->content->video->fade (video.frame),
692                         piece->content->video->scale().size (
693                                 piece->content->video, _video_container_size, _film->frame_size ()
694                                 ),
695                         _video_container_size,
696                         video.eyes,
697                         video.part,
698                         piece->content->video->colour_conversion ()
699                         )
700                 );
701
702         emit_video (_last_video[wp], time);
703 }
704
705 /** Do our common processing on some audio */
706 void
707 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
708 {
709         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
710
711         /* Gain */
712
713         if (content->gain() != 0) {
714                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
715                 gain->apply_gain (content->gain ());
716                 content_audio.audio = gain;
717         }
718
719         /* Remap */
720
721         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
722         dcp_mapped->make_silent ();
723
724         AudioMapping map = stream->mapping ();
725         for (int i = 0; i < map.input_channels(); ++i) {
726                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
727                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
728                                 dcp_mapped->accumulate_channel (
729                                         content_audio.audio.get(),
730                                         i,
731                                         static_cast<dcp::Channel> (j),
732                                         map.get (i, static_cast<dcp::Channel> (j))
733                                         );
734                         }
735                 }
736         }
737
738         content_audio.audio = dcp_mapped;
739
740         /* Process */
741
742         if (_audio_processor) {
743                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
744         }
745
746         /* Pad any gap which may be caused by audio delay */
747
748         if (_last_audio_time) {
749                 fill_audio (DCPTimePeriod (*_last_audio_time, time));
750         }
751
752         /* Push */
753
754         _audio_merger.push (content_audio.audio, time);
755         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
756         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
757 }
758
759 void
760 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
761 {
762         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
763
764         shared_ptr<Piece> piece = wp.lock ();
765         if (!piece) {
766                 return;
767         }
768
769         shared_ptr<AudioContent> content = piece->content->audio;
770         DCPOMATIC_ASSERT (content);
771
772         /* Compute time in the DCP */
773         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
774         /* And the end of this block in the DCP */
775         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
776
777         /* Remove anything that comes before the start or after the end of the content */
778         if (time < piece->content->position()) {
779                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
780                 if (!cut.first) {
781                         /* This audio is entirely discarded */
782                         return;
783                 }
784                 content_audio.audio = cut.first;
785                 time = cut.second;
786         } else if (time > piece->content->end()) {
787                 /* Discard it all */
788                 return;
789         } else if (end > piece->content->end()) {
790                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
791                 if (remaining_frames == 0) {
792                         return;
793                 }
794                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
795                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
796                 content_audio.audio = cut;
797         }
798
799         audio_transform (content, stream, content_audio, time);
800 }
801
802 void
803 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
804 {
805         shared_ptr<Piece> piece = wp.lock ();
806         if (!piece) {
807                 return;
808         }
809
810         /* Apply content's subtitle offsets */
811         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
812         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
813
814         /* Apply content's subtitle scale */
815         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
816         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
817
818         /* Apply a corrective translation to keep the subtitle centred after that scale */
819         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
820         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
821
822         PlayerSubtitles ps;
823         ps.image.push_back (subtitle.sub);
824         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
825
826         _active_subtitles.add_from (wp, ps, from);
827 }
828
829 void
830 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
831 {
832         shared_ptr<Piece> piece = wp.lock ();
833         if (!piece) {
834                 return;
835         }
836
837         PlayerSubtitles ps;
838         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
839
840         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
841                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
842                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
843                 float const xs = piece->content->subtitle->x_scale();
844                 float const ys = piece->content->subtitle->y_scale();
845                 float size = s.size();
846
847                 /* Adjust size to express the common part of the scaling;
848                    e.g. if xs = ys = 0.5 we scale size by 2.
849                 */
850                 if (xs > 1e-5 && ys > 1e-5) {
851                         size *= 1 / min (1 / xs, 1 / ys);
852                 }
853                 s.set_size (size);
854
855                 /* Then express aspect ratio changes */
856                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
857                         s.set_aspect_adjust (xs / ys);
858                 }
859
860                 s.set_in (dcp::Time(from.seconds(), 1000));
861                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
862                 ps.add_fonts (piece->content->subtitle->fonts ());
863         }
864
865         _active_subtitles.add_from (wp, ps, from);
866 }
867
868 void
869 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
870 {
871         if (!_active_subtitles.have (wp)) {
872                 return;
873         }
874
875         shared_ptr<Piece> piece = wp.lock ();
876         if (!piece) {
877                 return;
878         }
879
880         DCPTime const dcp_to = content_time_to_dcp (piece, to);
881
882         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
883
884         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
885                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
886         }
887 }
888
889 void
890 Player::seek (DCPTime time, bool accurate)
891 {
892         if (_audio_processor) {
893                 _audio_processor->flush ();
894         }
895
896         _audio_merger.clear ();
897         _active_subtitles.clear ();
898
899         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
900                 if (time < i->content->position()) {
901                         /* Before; seek to 0 */
902                         i->decoder->seek (ContentTime(), accurate);
903                         i->done = false;
904                 } else if (i->content->position() <= time && time < i->content->end()) {
905                         /* During; seek to position */
906                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
907                         i->done = false;
908                 } else {
909                         /* After; this piece is done */
910                         i->done = true;
911                 }
912         }
913
914         if (accurate) {
915                 _last_video_time = time;
916                 _last_audio_time = time;
917         } else {
918                 _last_video_time = optional<DCPTime>();
919                 _last_audio_time = optional<DCPTime>();
920         }
921 }
922
923 void
924 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
925 {
926         optional<PositionImage> subtitles = subtitles_for_frame (time);
927         if (subtitles) {
928                 pv->set_subtitle (subtitles.get ());
929         }
930
931         Video (pv, time);
932
933         if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
934                 _last_video_time = time + one_video_frame();
935                 _active_subtitles.clear_before (time);
936         }
937 }
938
939 void
940 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
941 {
942         Audio (data, time);
943         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
944 }
945
946 void
947 Player::fill_audio (DCPTimePeriod period)
948 {
949         if (period.from == period.to) {
950                 return;
951         }
952
953         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
954                 DCPTime t = i.from;
955                 while (t < i.to) {
956                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
957                         Frame const samples = block.frames_round(_film->audio_frame_rate());
958                         if (samples) {
959                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
960                                 silence->make_silent ();
961                                 emit_audio (silence, t);
962                         }
963                         t += block;
964                 }
965         }
966 }
967
968 DCPTime
969 Player::one_video_frame () const
970 {
971         return DCPTime::from_frames (1, _film->video_frame_rate ());
972 }
973
974 pair<shared_ptr<AudioBuffers>, DCPTime>
975 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
976 {
977         DCPTime const discard_time = discard_to - time;
978         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
979         Frame remaining_frames = audio->frames() - discard_frames;
980         if (remaining_frames <= 0) {
981                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
982         }
983         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
984         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
985         return make_pair(cut, time + discard_time);
986 }