Fix missing subtitle in some cases.
[dcpomatic.git] / src / lib / player.cc
1 /*
2     Copyright (C) 2013-2017 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21 #include "player.h"
22 #include "film.h"
23 #include "audio_buffers.h"
24 #include "content_audio.h"
25 #include "dcp_content.h"
26 #include "job.h"
27 #include "image.h"
28 #include "raw_image_proxy.h"
29 #include "ratio.h"
30 #include "log.h"
31 #include "render_subtitles.h"
32 #include "config.h"
33 #include "content_video.h"
34 #include "player_video.h"
35 #include "frame_rate_change.h"
36 #include "audio_processor.h"
37 #include "playlist.h"
38 #include "referenced_reel_asset.h"
39 #include "decoder_factory.h"
40 #include "decoder.h"
41 #include "video_decoder.h"
42 #include "audio_decoder.h"
43 #include "subtitle_content.h"
44 #include "subtitle_decoder.h"
45 #include "ffmpeg_content.h"
46 #include "audio_content.h"
47 #include "content_subtitle.h"
48 #include "dcp_decoder.h"
49 #include "image_decoder.h"
50 #include "resampler.h"
51 #include "compose.hpp"
52 #include <dcp/reel.h>
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
57 #include <stdint.h>
58 #include <algorithm>
59 #include <iostream>
60
61 #include "i18n.h"
62
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
64
65 using std::list;
66 using std::cout;
67 using std::min;
68 using std::max;
69 using std::min;
70 using std::vector;
71 using std::pair;
72 using std::map;
73 using std::make_pair;
74 using std::copy;
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
80
81 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
82         : _film (film)
83         , _playlist (playlist)
84         , _have_valid_pieces (false)
85         , _ignore_video (false)
86         , _ignore_audio (false)
87         , _always_burn_subtitles (false)
88         , _fast (false)
89         , _play_referenced (false)
90         , _last_seek_accurate (true)
91         , _audio_merger (_film->audio_frame_rate())
92 {
93         _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
94         _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
95         _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
96         set_video_container_size (_film->frame_size ());
97
98         film_changed (Film::AUDIO_PROCESSOR);
99
100         seek (DCPTime (), true);
101 }
102
103 void
104 Player::setup_pieces ()
105 {
106         _pieces.clear ();
107
108         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
109
110                 if (!i->paths_valid ()) {
111                         continue;
112                 }
113
114                 shared_ptr<Decoder> decoder = decoder_factory (i, _film->log());
115                 FrameRateChange frc (i->active_video_frame_rate(), _film->video_frame_rate());
116
117                 if (!decoder) {
118                         /* Not something that we can decode; e.g. Atmos content */
119                         continue;
120                 }
121
122                 if (decoder->video && _ignore_video) {
123                         decoder->video->set_ignore ();
124                 }
125
126                 if (decoder->audio && _ignore_audio) {
127                         decoder->audio->set_ignore ();
128                 }
129
130                 shared_ptr<DCPDecoder> dcp = dynamic_pointer_cast<DCPDecoder> (decoder);
131                 if (dcp && _play_referenced) {
132                         dcp->set_decode_referenced ();
133                 }
134
135                 shared_ptr<Piece> piece (new Piece (i, decoder, frc));
136                 _pieces.push_back (piece);
137
138                 if (decoder->video) {
139                         decoder->video->Data.connect (bind (&Player::video, this, weak_ptr<Piece> (piece), _1));
140                 }
141
142                 if (decoder->audio) {
143                         decoder->audio->Data.connect (bind (&Player::audio, this, weak_ptr<Piece> (piece), _1, _2));
144                 }
145
146                 if (decoder->subtitle) {
147                         decoder->subtitle->ImageStart.connect (bind (&Player::image_subtitle_start, this, weak_ptr<Piece> (piece), _1));
148                         decoder->subtitle->TextStart.connect (bind (&Player::text_subtitle_start, this, weak_ptr<Piece> (piece), _1));
149                         decoder->subtitle->Stop.connect (bind (&Player::subtitle_stop, this, weak_ptr<Piece> (piece), _1));
150                 }
151         }
152
153         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
154                 if (i->content->audio) {
155                         BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams()) {
156                                 _stream_states[j] = StreamState (i, i->content->position ());
157                         }
158                 }
159         }
160
161         if (!_play_referenced) {
162                 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
163                         shared_ptr<DCPContent> dc = dynamic_pointer_cast<DCPContent> (i->content);
164                         if (dc) {
165                                 if (dc->reference_video()) {
166                                         _no_video.push_back (DCPTimePeriod (dc->position(), dc->end()));
167                                 }
168                                 if (dc->reference_audio()) {
169                                         _no_audio.push_back (DCPTimePeriod (dc->position(), dc->end()));
170                                 }
171                         }
172                 }
173         }
174
175         _last_video_time = optional<DCPTime> ();
176         _last_audio_time = optional<DCPTime> ();
177         _have_valid_pieces = true;
178 }
179
180 void
181 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
182 {
183         shared_ptr<Content> c = w.lock ();
184         if (!c) {
185                 return;
186         }
187
188         if (
189                 property == ContentProperty::POSITION ||
190                 property == ContentProperty::LENGTH ||
191                 property == ContentProperty::TRIM_START ||
192                 property == ContentProperty::TRIM_END ||
193                 property == ContentProperty::PATH ||
194                 property == VideoContentProperty::FRAME_TYPE ||
195                 property == DCPContentProperty::NEEDS_ASSETS ||
196                 property == DCPContentProperty::NEEDS_KDM ||
197                 property == SubtitleContentProperty::COLOUR ||
198                 property == SubtitleContentProperty::OUTLINE ||
199                 property == SubtitleContentProperty::SHADOW ||
200                 property == SubtitleContentProperty::EFFECT_COLOUR ||
201                 property == FFmpegContentProperty::SUBTITLE_STREAM ||
202                 property == VideoContentProperty::COLOUR_CONVERSION
203                 ) {
204
205                 _have_valid_pieces = false;
206                 Changed (frequent);
207
208         } else if (
209                 property == SubtitleContentProperty::LINE_SPACING ||
210                 property == SubtitleContentProperty::OUTLINE_WIDTH ||
211                 property == SubtitleContentProperty::Y_SCALE ||
212                 property == SubtitleContentProperty::FADE_IN ||
213                 property == SubtitleContentProperty::FADE_OUT ||
214                 property == ContentProperty::VIDEO_FRAME_RATE ||
215                 property == SubtitleContentProperty::USE ||
216                 property == SubtitleContentProperty::X_OFFSET ||
217                 property == SubtitleContentProperty::Y_OFFSET ||
218                 property == SubtitleContentProperty::X_SCALE ||
219                 property == SubtitleContentProperty::FONTS ||
220                 property == VideoContentProperty::CROP ||
221                 property == VideoContentProperty::SCALE ||
222                 property == VideoContentProperty::FADE_IN ||
223                 property == VideoContentProperty::FADE_OUT
224                 ) {
225
226                 Changed (frequent);
227         }
228 }
229
230 void
231 Player::set_video_container_size (dcp::Size s)
232 {
233         if (s == _video_container_size) {
234                 return;
235         }
236
237         _video_container_size = s;
238
239         _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
240         _black_image->make_black ();
241
242         Changed (false);
243 }
244
245 void
246 Player::playlist_changed ()
247 {
248         _have_valid_pieces = false;
249         Changed (false);
250 }
251
252 void
253 Player::film_changed (Film::Property p)
254 {
255         /* Here we should notice Film properties that affect our output, and
256            alert listeners that our output now would be different to how it was
257            last time we were run.
258         */
259
260         if (p == Film::CONTAINER) {
261                 Changed (false);
262         } else if (p == Film::VIDEO_FRAME_RATE) {
263                 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
264                    so we need new pieces here.
265                 */
266                 _have_valid_pieces = false;
267                 Changed (false);
268         } else if (p == Film::AUDIO_PROCESSOR) {
269                 if (_film->audio_processor ()) {
270                         _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
271                 }
272         }
273 }
274
275 list<PositionImage>
276 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
277 {
278         list<PositionImage> all;
279
280         for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
281                 if (!i->image) {
282                         continue;
283                 }
284
285                 /* We will scale the subtitle up to fit _video_container_size */
286                 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
287
288                 /* Then we need a corrective translation, consisting of two parts:
289                  *
290                  * 1.  that which is the result of the scaling of the subtitle by _video_container_size; this will be
291                  *     rect.x * _video_container_size.width and rect.y * _video_container_size.height.
292                  *
293                  * 2.  that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
294                  *     (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
295                  *     (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
296                  *
297                  * Combining these two translations gives these expressions.
298                  */
299
300                 all.push_back (
301                         PositionImage (
302                                 i->image->scale (
303                                         scaled_size,
304                                         dcp::YUV_TO_RGB_REC601,
305                                         i->image->pixel_format (),
306                                         true,
307                                         _fast
308                                         ),
309                                 Position<int> (
310                                         lrint (_video_container_size.width * i->rectangle.x),
311                                         lrint (_video_container_size.height * i->rectangle.y)
312                                         )
313                                 )
314                         );
315         }
316
317         return all;
318 }
319
320 shared_ptr<PlayerVideo>
321 Player::black_player_video_frame () const
322 {
323         return shared_ptr<PlayerVideo> (
324                 new PlayerVideo (
325                         shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
326                         Crop (),
327                         optional<double> (),
328                         _video_container_size,
329                         _video_container_size,
330                         EYES_BOTH,
331                         PART_WHOLE,
332                         PresetColourConversion::all().front().conversion
333                 )
334         );
335 }
336
337 Frame
338 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
339 {
340         DCPTime s = t - piece->content->position ();
341         s = min (piece->content->length_after_trim(), s);
342         s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
343
344         /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
345            then convert that ContentTime to frames at the content's rate.  However this fails for
346            situations like content at 29.9978733fps, DCP at 30fps.  The accuracy of the Time type is not
347            enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
348
349            Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
350         */
351         return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
352 }
353
354 DCPTime
355 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
356 {
357         /* See comment in dcp_to_content_video */
358         DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
359         return max (DCPTime (), d + piece->content->position ());
360 }
361
362 Frame
363 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
364 {
365         DCPTime s = t - piece->content->position ();
366         s = min (piece->content->length_after_trim(), s);
367         /* See notes in dcp_to_content_video */
368         return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
369 }
370
371 DCPTime
372 Player::resampled_audio_to_dcp (shared_ptr<const Piece> piece, Frame f) const
373 {
374         /* See comment in dcp_to_content_video */
375         DCPTime const d = DCPTime::from_frames (f, _film->audio_frame_rate()) - DCPTime (piece->content->trim_start(), piece->frc);
376         return max (DCPTime (), d + piece->content->position ());
377 }
378
379 ContentTime
380 Player::dcp_to_content_time (shared_ptr<const Piece> piece, DCPTime t) const
381 {
382         DCPTime s = t - piece->content->position ();
383         s = min (piece->content->length_after_trim(), s);
384         return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
385 }
386
387 DCPTime
388 Player::content_time_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
389 {
390         return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
391 }
392
393 list<shared_ptr<Font> >
394 Player::get_subtitle_fonts ()
395 {
396         if (!_have_valid_pieces) {
397                 setup_pieces ();
398         }
399
400         list<shared_ptr<Font> > fonts;
401         BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
402                 if (p->content->subtitle) {
403                         /* XXX: things may go wrong if there are duplicate font IDs
404                            with different font files.
405                         */
406                         list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
407                         copy (f.begin(), f.end(), back_inserter (fonts));
408                 }
409         }
410
411         return fonts;
412 }
413
414 /** Set this player never to produce any video data */
415 void
416 Player::set_ignore_video ()
417 {
418         _ignore_video = true;
419 }
420
421 /** Set whether or not this player should always burn text subtitles into the image,
422  *  regardless of the content settings.
423  *  @param burn true to always burn subtitles, false to obey content settings.
424  */
425 void
426 Player::set_always_burn_subtitles (bool burn)
427 {
428         _always_burn_subtitles = burn;
429 }
430
431 void
432 Player::set_fast ()
433 {
434         _fast = true;
435         _have_valid_pieces = false;
436 }
437
438 void
439 Player::set_play_referenced ()
440 {
441         _play_referenced = true;
442         _have_valid_pieces = false;
443 }
444
445 list<ReferencedReelAsset>
446 Player::get_reel_assets ()
447 {
448         list<ReferencedReelAsset> a;
449
450         BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
451                 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
452                 if (!j) {
453                         continue;
454                 }
455
456                 scoped_ptr<DCPDecoder> decoder;
457                 try {
458                         decoder.reset (new DCPDecoder (j, _film->log()));
459                 } catch (...) {
460                         return a;
461                 }
462
463                 int64_t offset = 0;
464                 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
465
466                         DCPOMATIC_ASSERT (j->video_frame_rate ());
467                         double const cfr = j->video_frame_rate().get();
468                         Frame const trim_start = j->trim_start().frames_round (cfr);
469                         Frame const trim_end = j->trim_end().frames_round (cfr);
470                         int const ffr = _film->video_frame_rate ();
471
472                         DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
473                         if (j->reference_video ()) {
474                                 shared_ptr<dcp::ReelAsset> ra = k->main_picture ();
475                                 DCPOMATIC_ASSERT (ra);
476                                 ra->set_entry_point (ra->entry_point() + trim_start);
477                                 ra->set_duration (ra->duration() - trim_start - trim_end);
478                                 a.push_back (
479                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
480                                         );
481                         }
482
483                         if (j->reference_audio ()) {
484                                 shared_ptr<dcp::ReelAsset> ra = k->main_sound ();
485                                 DCPOMATIC_ASSERT (ra);
486                                 ra->set_entry_point (ra->entry_point() + trim_start);
487                                 ra->set_duration (ra->duration() - trim_start - trim_end);
488                                 a.push_back (
489                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
490                                         );
491                         }
492
493                         if (j->reference_subtitle ()) {
494                                 shared_ptr<dcp::ReelAsset> ra = k->main_subtitle ();
495                                 DCPOMATIC_ASSERT (ra);
496                                 ra->set_entry_point (ra->entry_point() + trim_start);
497                                 ra->set_duration (ra->duration() - trim_start - trim_end);
498                                 a.push_back (
499                                         ReferencedReelAsset (ra, DCPTimePeriod (from, from + DCPTime::from_frames (ra->duration(), ffr)))
500                                         );
501                         }
502
503                         /* Assume that main picture duration is the length of the reel */
504                         offset += k->main_picture()->duration ();
505                 }
506         }
507
508         return a;
509 }
510
511 bool
512 Player::pass ()
513 {
514         if (!_have_valid_pieces) {
515                 setup_pieces ();
516         }
517
518         shared_ptr<Piece> earliest;
519         DCPTime earliest_content;
520
521         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
522                 if (!i->done) {
523                         DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
524                         if (!earliest || t < earliest_content) {
525                                 earliest_content = t;
526                                 earliest = i;
527                         }
528                 }
529         }
530
531         if (earliest) {
532                 earliest->done = earliest->decoder->pass ();
533                 if (earliest->done && earliest->content->audio) {
534                         /* Flush the Player audio system for this piece */
535                         BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
536                                 audio_flush (earliest, i);
537                         }
538                 }
539         }
540
541         /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
542            NOT to fill gaps within content (the latter is done in ::video())
543         */
544         DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
545
546         /* Work out where to fill video from */
547         optional<DCPTime> video_fill_from;
548         if (_last_video_time && !_playlist->video_content_at(*_last_video_time)) {
549                 /* No seek; fill from the last video time */
550                 video_fill_from = _last_video_time;
551         } else if (_last_seek_time && !_playlist->video_content_at(*_last_seek_time)) {
552                 /* Seek into an empty area; fill from the seek time */
553                 video_fill_from = _last_seek_time;
554         }
555
556         bool filled = false;
557         /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
558            Piece which emits black in spaces (we only emit if we are the earliest thing)
559         */
560         if (earliest && video_fill_from && *video_fill_from < earliest_content && ((fill_towards - *video_fill_from)) > one_video_frame()) {
561                 emit_video (black_player_video_frame(), *video_fill_from);
562                 filled = true;
563         } else if (_playlist->length() == DCPTime()) {
564                 /* Special case of an empty Film; just give one black frame */
565                 emit_video (black_player_video_frame(), DCPTime());
566                 filled = true;
567         }
568
569         optional<DCPTime> audio_fill_from;
570         if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time)) {
571                 /* No seek; fill from the last thing that happened */
572                 audio_fill_from = _last_audio_time;
573         } else if (_last_seek_time && !_playlist->audio_content_at(*_last_seek_time)) {
574                 /* Seek into an empty area; fill from the seek time */
575                 audio_fill_from = _last_seek_time;
576         }
577
578         if (audio_fill_from && audio_fill_from < fill_towards) {
579                 DCPTimePeriod period (*audio_fill_from, fill_towards);
580                 if (period.duration() > one_video_frame()) {
581                         period.to = period.from + one_video_frame();
582                 }
583                 fill_audio (period);
584                 filled = true;
585         }
586
587         if (!earliest && !filled) {
588                 return true;
589         }
590
591         /* Emit any audio that is ready */
592
593         DCPTime pull_from = _playlist->length ();
594         for (map<AudioStreamPtr, StreamState>::const_iterator i = _stream_states.begin(); i != _stream_states.end(); ++i) {
595                 if (!i->second.piece->done && i->second.last_push_end < pull_from) {
596                         pull_from = i->second.last_push_end;
597                 }
598         }
599
600         list<pair<shared_ptr<AudioBuffers>, DCPTime> > audio = _audio_merger.pull (pull_from);
601         for (list<pair<shared_ptr<AudioBuffers>, DCPTime> >::iterator i = audio.begin(); i != audio.end(); ++i) {
602                 if (_last_audio_time && i->second < *_last_audio_time) {
603                         /* There has been an accurate seek and we have received some audio before the seek time;
604                            discard it.
605                         */
606                         pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (i->first, i->second, *_last_audio_time);
607                         if (!cut.first) {
608                                 continue;
609                         }
610                         *i = cut;
611                 }
612
613                 if (_last_audio_time) {
614                         fill_audio (DCPTimePeriod (*_last_audio_time, i->second));
615                 }
616
617                 emit_audio (i->first, i->second);
618         }
619
620         return false;
621 }
622
623 optional<PositionImage>
624 Player::subtitles_for_frame (DCPTime time) const
625 {
626         list<PositionImage> subtitles;
627
628         BOOST_FOREACH (PlayerSubtitles i, _active_subtitles.get_burnt (time, _always_burn_subtitles)) {
629
630                 /* Image subtitles */
631                 list<PositionImage> c = transform_image_subtitles (i.image);
632                 copy (c.begin(), c.end(), back_inserter (subtitles));
633
634                 /* Text subtitles (rendered to an image) */
635                 if (!i.text.empty ()) {
636                         list<PositionImage> s = render_subtitles (i.text, i.fonts, _video_container_size, time);
637                         copy (s.begin(), s.end(), back_inserter (subtitles));
638                 }
639         }
640
641         if (subtitles.empty ()) {
642                 return optional<PositionImage> ();
643         }
644
645         return merge (subtitles);
646 }
647
648 void
649 Player::video (weak_ptr<Piece> wp, ContentVideo video)
650 {
651         shared_ptr<Piece> piece = wp.lock ();
652         if (!piece) {
653                 return;
654         }
655
656         FrameRateChange frc(piece->content->active_video_frame_rate(), _film->video_frame_rate());
657         if (frc.skip && (video.frame % 2) == 1) {
658                 return;
659         }
660
661         /* Time and period of the frame we will emit */
662         DCPTime const time = content_video_to_dcp (piece, video.frame);
663         DCPTimePeriod const period (time, time + one_video_frame());
664
665         /* Discard if it's outside the content's period or if it's before the last accurate seek */
666         if (
667                 time < piece->content->position() ||
668                 time >= piece->content->end() ||
669                 (_last_seek_time && _last_seek_accurate && time < *_last_seek_time)) {
670                 return;
671         }
672
673         /* Fill gaps caused by (the hopefully rare event of) a decoder not emitting contiguous video.  We have to do this here
674            as in the problematic case we are about to emit a frame which is not contiguous with the previous.
675         */
676
677         if (_last_video_time) {
678                 /* XXX: this may not work for 3D */
679                 BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (*_last_video_time, time), _no_video)) {
680                         for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
681                                 if (_last_video) {
682                                         emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
683                                 } else {
684                                         emit_video (black_player_video_frame(), j);
685                                 }
686                         }
687                 }
688         }
689
690         _last_video.reset (
691                 new PlayerVideo (
692                         video.image,
693                         piece->content->video->crop (),
694                         piece->content->video->fade (video.frame),
695                         piece->content->video->scale().size (
696                                 piece->content->video, _video_container_size, _film->frame_size ()
697                                 ),
698                         _video_container_size,
699                         video.eyes,
700                         video.part,
701                         piece->content->video->colour_conversion ()
702                         )
703                 );
704
705         emit_video (_last_video, time);
706 }
707
708 void
709 Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
710 {
711         shared_ptr<AudioContent> content = piece->content->audio;
712         DCPOMATIC_ASSERT (content);
713
714         shared_ptr<Resampler> r = resampler (content, stream, false);
715         if (!r) {
716                 return;
717         }
718
719         pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
720         if (ro.first->frames() == 0) {
721                 return;
722         }
723
724         ContentAudio content_audio;
725         content_audio.audio = ro.first;
726         content_audio.frame = ro.second;
727
728         /* Compute time in the DCP */
729         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
730
731         audio_transform (content, stream, content_audio, time);
732 }
733
734 /** Do our common processing on some audio */
735 void
736 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
737 {
738         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
739
740         /* Gain */
741
742         if (content->gain() != 0) {
743                 shared_ptr<AudioBuffers> gain (new AudioBuffers (content_audio.audio));
744                 gain->apply_gain (content->gain ());
745                 content_audio.audio = gain;
746         }
747
748         /* Remap */
749
750         shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
751         dcp_mapped->make_silent ();
752
753         AudioMapping map = stream->mapping ();
754         for (int i = 0; i < map.input_channels(); ++i) {
755                 for (int j = 0; j < dcp_mapped->channels(); ++j) {
756                         if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
757                                 dcp_mapped->accumulate_channel (
758                                         content_audio.audio.get(),
759                                         i,
760                                         static_cast<dcp::Channel> (j),
761                                         map.get (i, static_cast<dcp::Channel> (j))
762                                         );
763                         }
764                 }
765         }
766
767         content_audio.audio = dcp_mapped;
768
769         /* Process */
770
771         if (_audio_processor) {
772                 content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
773         }
774
775         /* Push */
776
777         _audio_merger.push (content_audio.audio, time);
778         DCPOMATIC_ASSERT (_stream_states.find (stream) != _stream_states.end ());
779         _stream_states[stream].last_push_end = time + DCPTime::from_frames (content_audio.audio->frames(), _film->audio_frame_rate());
780 }
781
782 void
783 Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_audio)
784 {
785         DCPOMATIC_ASSERT (content_audio.audio->frames() > 0);
786
787         shared_ptr<Piece> piece = wp.lock ();
788         if (!piece) {
789                 return;
790         }
791
792         shared_ptr<AudioContent> content = piece->content->audio;
793         DCPOMATIC_ASSERT (content);
794
795         /* Resample */
796         if (stream->frame_rate() != content->resampled_frame_rate()) {
797                 shared_ptr<Resampler> r = resampler (content, stream, true);
798                 pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
799                 if (ro.first->frames() == 0) {
800                         return;
801                 }
802                 content_audio.audio = ro.first;
803                 content_audio.frame = ro.second;
804         }
805
806         /* Compute time in the DCP */
807         DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
808         /* And the end of this block in the DCP */
809         DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
810
811         /* Remove anything that comes before the start or after the end of the content */
812         if (time < piece->content->position()) {
813                 pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
814                 if (!cut.first) {
815                         /* This audio is entirely discarded */
816                         return;
817                 }
818                 content_audio.audio = cut.first;
819                 time = cut.second;
820         } else if (time > piece->content->end()) {
821                 /* Discard it all */
822                 return;
823         } else if (end > piece->content->end()) {
824                 Frame const remaining_frames = DCPTime(piece->content->end() - time).frames_round(_film->audio_frame_rate());
825                 if (remaining_frames == 0) {
826                         return;
827                 }
828                 shared_ptr<AudioBuffers> cut (new AudioBuffers (content_audio.audio->channels(), remaining_frames));
829                 cut->copy_from (content_audio.audio.get(), remaining_frames, 0, 0);
830                 content_audio.audio = cut;
831         }
832
833         audio_transform (content, stream, content_audio, time);
834 }
835
836 void
837 Player::image_subtitle_start (weak_ptr<Piece> wp, ContentImageSubtitle subtitle)
838 {
839         shared_ptr<Piece> piece = wp.lock ();
840         if (!piece) {
841                 return;
842         }
843
844         /* Apply content's subtitle offsets */
845         subtitle.sub.rectangle.x += piece->content->subtitle->x_offset ();
846         subtitle.sub.rectangle.y += piece->content->subtitle->y_offset ();
847
848         /* Apply content's subtitle scale */
849         subtitle.sub.rectangle.width *= piece->content->subtitle->x_scale ();
850         subtitle.sub.rectangle.height *= piece->content->subtitle->y_scale ();
851
852         /* Apply a corrective translation to keep the subtitle centred after that scale */
853         subtitle.sub.rectangle.x -= subtitle.sub.rectangle.width * (piece->content->subtitle->x_scale() - 1);
854         subtitle.sub.rectangle.y -= subtitle.sub.rectangle.height * (piece->content->subtitle->y_scale() - 1);
855
856         PlayerSubtitles ps;
857         ps.image.push_back (subtitle.sub);
858         DCPTime from (content_time_to_dcp (piece, subtitle.from()));
859
860         _active_subtitles.add_from (wp, ps, from);
861 }
862
863 void
864 Player::text_subtitle_start (weak_ptr<Piece> wp, ContentTextSubtitle subtitle)
865 {
866         shared_ptr<Piece> piece = wp.lock ();
867         if (!piece) {
868                 return;
869         }
870
871         PlayerSubtitles ps;
872         DCPTime const from (content_time_to_dcp (piece, subtitle.from()));
873
874         BOOST_FOREACH (dcp::SubtitleString s, subtitle.subs) {
875                 s.set_h_position (s.h_position() + piece->content->subtitle->x_offset ());
876                 s.set_v_position (s.v_position() + piece->content->subtitle->y_offset ());
877                 float const xs = piece->content->subtitle->x_scale();
878                 float const ys = piece->content->subtitle->y_scale();
879                 float size = s.size();
880
881                 /* Adjust size to express the common part of the scaling;
882                    e.g. if xs = ys = 0.5 we scale size by 2.
883                 */
884                 if (xs > 1e-5 && ys > 1e-5) {
885                         size *= 1 / min (1 / xs, 1 / ys);
886                 }
887                 s.set_size (size);
888
889                 /* Then express aspect ratio changes */
890                 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
891                         s.set_aspect_adjust (xs / ys);
892                 }
893
894                 s.set_in (dcp::Time(from.seconds(), 1000));
895                 ps.text.push_back (SubtitleString (s, piece->content->subtitle->outline_width()));
896                 ps.add_fonts (piece->content->subtitle->fonts ());
897         }
898
899         _active_subtitles.add_from (wp, ps, from);
900 }
901
902 void
903 Player::subtitle_stop (weak_ptr<Piece> wp, ContentTime to)
904 {
905         if (!_active_subtitles.have (wp)) {
906                 return;
907         }
908
909         shared_ptr<Piece> piece = wp.lock ();
910         if (!piece) {
911                 return;
912         }
913
914         DCPTime const dcp_to = content_time_to_dcp (piece, to);
915
916         pair<PlayerSubtitles, DCPTime> from = _active_subtitles.add_to (wp, dcp_to);
917
918         if (piece->content->subtitle->use() && !_always_burn_subtitles && !piece->content->subtitle->burn()) {
919                 Subtitle (from.first, DCPTimePeriod (from.second, dcp_to));
920         }
921 }
922
923 void
924 Player::seek (DCPTime time, bool accurate)
925 {
926         if (_audio_processor) {
927                 _audio_processor->flush ();
928         }
929
930         for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
931                 i->second->flush ();
932                 i->second->reset ();
933         }
934
935         _audio_merger.clear ();
936         _active_subtitles.clear ();
937
938         BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
939                 if (time < i->content->position()) {
940                         /* Before; seek to 0 */
941                         i->decoder->seek (ContentTime(), accurate);
942                         i->done = false;
943                 } else if (i->content->position() <= time && time < i->content->end()) {
944                         /* During; seek to position */
945                         i->decoder->seek (dcp_to_content_time (i, time), accurate);
946                         i->done = false;
947                 } else {
948                         /* After; this piece is done */
949                         i->done = true;
950                 }
951         }
952
953         _last_video_time = optional<DCPTime> ();
954         _last_audio_time = optional<DCPTime> ();
955         _last_seek_time = time;
956         _last_seek_accurate = accurate;
957 }
958
959 shared_ptr<Resampler>
960 Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
961 {
962         ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
963         if (i != _resamplers.end ()) {
964                 return i->second;
965         }
966
967         if (!create) {
968                 return shared_ptr<Resampler> ();
969         }
970
971         LOG_GENERAL (
972                 "Creating new resampler from %1 to %2 with %3 channels",
973                 stream->frame_rate(),
974                 content->resampled_frame_rate(),
975                 stream->channels()
976                 );
977
978         shared_ptr<Resampler> r (
979                 new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
980                 );
981
982         _resamplers[make_pair(content, stream)] = r;
983         return r;
984 }
985
986 void
987 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
988 {
989         optional<PositionImage> subtitles = subtitles_for_frame (time);
990         if (subtitles) {
991                 pv->set_subtitle (subtitles.get ());
992         }
993         Video (pv, time);
994         _last_video_time = time + one_video_frame();
995         _active_subtitles.clear_before (time);
996 }
997
998 void
999 Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
1000 {
1001         Audio (data, time);
1002         _last_audio_time = time + DCPTime::from_frames (data->frames(), _film->audio_frame_rate ());
1003 }
1004
1005 void
1006 Player::fill_audio (DCPTimePeriod period)
1007 {
1008         BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
1009                 DCPTime t = i.from;
1010                 while (t < i.to) {
1011                         DCPTime block = min (DCPTime::from_seconds (0.5), i.to - t);
1012                         Frame const samples = block.frames_round(_film->audio_frame_rate());
1013                         if (samples) {
1014                                 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), samples));
1015                                 silence->make_silent ();
1016                                 emit_audio (silence, t);
1017                         }
1018                         t += block;
1019                 }
1020         }
1021 }
1022
1023 DCPTime
1024 Player::one_video_frame () const
1025 {
1026         return DCPTime::from_frames (1, _film->video_frame_rate ());
1027 }
1028
1029 pair<shared_ptr<AudioBuffers>, DCPTime>
1030 Player::discard_audio (shared_ptr<const AudioBuffers> audio, DCPTime time, DCPTime discard_to) const
1031 {
1032         DCPTime const discard_time = discard_to - time;
1033         Frame const discard_frames = discard_time.frames_round(_film->audio_frame_rate());
1034         Frame remaining_frames = audio->frames() - discard_frames;
1035         if (remaining_frames <= 0) {
1036                 return make_pair(shared_ptr<AudioBuffers>(), DCPTime());
1037         }
1038         shared_ptr<AudioBuffers> cut (new AudioBuffers (audio->channels(), remaining_frames));
1039         cut->copy_from (audio.get(), remaining_frames, discard_frames, 0);
1040         return make_pair(cut, time + discard_time);
1041 }