2 Copyright (C) 2013-2016 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "ffmpeg_decoder.h"
23 #include "video_decoder.h"
24 #include "audio_decoder.h"
25 #include "audio_buffers.h"
26 #include "audio_content.h"
27 #include "ffmpeg_content.h"
28 #include "image_decoder.h"
29 #include "content_audio.h"
30 #include "image_content.h"
31 #include "subtitle_content.h"
32 #include "text_subtitle_decoder.h"
33 #include "text_subtitle_content.h"
34 #include "dcp_content.h"
37 #include "raw_image_proxy.h"
40 #include "render_subtitles.h"
42 #include "content_video.h"
43 #include "player_video.h"
44 #include "frame_rate_change.h"
45 #include "dcp_content.h"
46 #include "dcp_decoder.h"
47 #include "dcp_subtitle_content.h"
48 #include "dcp_subtitle_decoder.h"
49 #include "audio_processor.h"
51 #include "referenced_reel_asset.h"
53 #include <dcp/reel_sound_asset.h>
54 #include <dcp/reel_subtitle_asset.h>
55 #include <dcp/reel_picture_asset.h>
56 #include <boost/foreach.hpp>
63 #define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), LogEntry::TYPE_GENERAL);
75 using boost::shared_ptr;
76 using boost::weak_ptr;
77 using boost::dynamic_pointer_cast;
78 using boost::optional;
79 using boost::scoped_ptr;
82 has_video (Content* c)
84 return static_cast<bool>(c->video);
88 has_audio (Content* c)
90 return static_cast<bool>(c->audio);
94 has_subtitle (Content* c)
96 return static_cast<bool>(c->subtitle);
99 Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist)
101 , _playlist (playlist)
102 , _have_valid_pieces (false)
103 , _ignore_video (false)
104 , _ignore_audio (false)
105 , _always_burn_subtitles (false)
107 , _play_referenced (false)
109 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
110 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
111 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::playlist_content_changed, this, _1, _2, _3));
112 set_video_container_size (_film->frame_size ());
114 film_changed (Film::AUDIO_PROCESSOR);
118 Player::setup_pieces ()
120 list<shared_ptr<Piece> > old_pieces = _pieces;
123 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
125 if (!i->paths_valid ()) {
129 shared_ptr<Decoder> decoder;
130 optional<FrameRateChange> frc;
133 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (i);
135 decoder.reset (new FFmpegDecoder (fc, _film->log(), _fast));
136 frc = FrameRateChange (fc->active_video_frame_rate(), _film->video_frame_rate());
139 shared_ptr<const DCPContent> dc = dynamic_pointer_cast<const DCPContent> (i);
141 decoder.reset (new DCPDecoder (dc, _film->log(), _fast));
142 frc = FrameRateChange (dc->active_video_frame_rate(), _film->video_frame_rate());
146 shared_ptr<const ImageContent> ic = dynamic_pointer_cast<const ImageContent> (i);
148 /* See if we can re-use an old ImageDecoder */
149 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
150 shared_ptr<ImageDecoder> imd = dynamic_pointer_cast<ImageDecoder> ((*j)->decoder);
151 if (imd && imd->content() == ic) {
157 decoder.reset (new ImageDecoder (ic, _film->log()));
160 frc = FrameRateChange (ic->active_video_frame_rate(), _film->video_frame_rate());
163 /* It's questionable whether subtitle content should have a video frame rate; perhaps
164 it should be assumed that any subtitle content has been prepared at the same rate
165 as simultaneous video content (like we do with audio).
168 /* TextSubtitleContent */
169 shared_ptr<const TextSubtitleContent> rc = dynamic_pointer_cast<const TextSubtitleContent> (i);
171 decoder.reset (new TextSubtitleDecoder (rc));
172 frc = FrameRateChange (rc->active_video_frame_rate(), _film->video_frame_rate());
175 /* DCPSubtitleContent */
176 shared_ptr<const DCPSubtitleContent> dsc = dynamic_pointer_cast<const DCPSubtitleContent> (i);
178 decoder.reset (new DCPSubtitleDecoder (dsc));
179 frc = FrameRateChange (dsc->active_video_frame_rate(), _film->video_frame_rate());
182 if (decoder->video && _ignore_video) {
183 decoder->video->set_ignore ();
186 if (decoder->audio && _ignore_audio) {
187 decoder->audio->set_ignore ();
190 _pieces.push_back (shared_ptr<Piece> (new Piece (i, decoder, frc.get ())));
193 _have_valid_pieces = true;
197 Player::playlist_content_changed (weak_ptr<Content> w, int property, bool frequent)
199 shared_ptr<Content> c = w.lock ();
205 property == ContentProperty::POSITION ||
206 property == ContentProperty::LENGTH ||
207 property == ContentProperty::TRIM_START ||
208 property == ContentProperty::TRIM_END ||
209 property == ContentProperty::PATH ||
210 property == VideoContentProperty::FRAME_TYPE ||
211 property == DCPContentProperty::CAN_BE_PLAYED ||
212 property == SubtitleContentProperty::COLOUR ||
213 property == SubtitleContentProperty::OUTLINE ||
214 property == SubtitleContentProperty::OUTLINE_COLOUR ||
215 property == FFmpegContentProperty::SUBTITLE_STREAM
218 _have_valid_pieces = false;
222 property == ContentProperty::VIDEO_FRAME_RATE ||
223 property == SubtitleContentProperty::USE ||
224 property == SubtitleContentProperty::X_OFFSET ||
225 property == SubtitleContentProperty::Y_OFFSET ||
226 property == SubtitleContentProperty::X_SCALE ||
227 property == SubtitleContentProperty::Y_SCALE ||
228 property == SubtitleContentProperty::FONTS ||
229 property == VideoContentProperty::CROP ||
230 property == VideoContentProperty::SCALE ||
231 property == VideoContentProperty::FADE_IN ||
232 property == VideoContentProperty::FADE_OUT ||
233 property == VideoContentProperty::COLOUR_CONVERSION
241 Player::set_video_container_size (dcp::Size s)
243 _video_container_size = s;
245 _black_image.reset (new Image (AV_PIX_FMT_RGB24, _video_container_size, true));
246 _black_image->make_black ();
250 Player::playlist_changed ()
252 _have_valid_pieces = false;
257 Player::film_changed (Film::Property p)
259 /* Here we should notice Film properties that affect our output, and
260 alert listeners that our output now would be different to how it was
261 last time we were run.
264 if (p == Film::CONTAINER) {
266 } else if (p == Film::VIDEO_FRAME_RATE) {
267 /* Pieces contain a FrameRateChange which contains the DCP frame rate,
268 so we need new pieces here.
270 _have_valid_pieces = false;
272 } else if (p == Film::AUDIO_PROCESSOR) {
273 if (_film->audio_processor ()) {
274 _audio_processor = _film->audio_processor()->clone (_film->audio_frame_rate ());
280 Player::transform_image_subtitles (list<ImageSubtitle> subs) const
282 list<PositionImage> all;
284 for (list<ImageSubtitle>::const_iterator i = subs.begin(); i != subs.end(); ++i) {
289 /* We will scale the subtitle up to fit _video_container_size */
290 dcp::Size scaled_size (i->rectangle.width * _video_container_size.width, i->rectangle.height * _video_container_size.height);
292 /* Then we need a corrective translation, consisting of two parts:
294 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
295 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
297 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
298 * (width_before_subtitle_scale * (1 - subtitle_x_scale) / 2) and
299 * (height_before_subtitle_scale * (1 - subtitle_y_scale) / 2).
301 * Combining these two translations gives these expressions.
308 dcp::YUV_TO_RGB_REC601,
309 i->image->pixel_format (),
314 lrint (_video_container_size.width * i->rectangle.x),
315 lrint (_video_container_size.height * i->rectangle.y)
324 shared_ptr<PlayerVideo>
325 Player::black_player_video_frame (DCPTime time) const
327 return shared_ptr<PlayerVideo> (
329 shared_ptr<const ImageProxy> (new RawImageProxy (_black_image)),
333 _video_container_size,
334 _video_container_size,
337 PresetColourConversion::all().front().conversion
342 /** @return All PlayerVideos at the given time. There may be none if the content
343 * at `time' is a DCP which we are passing through (i.e. referring to by reference)
344 * or 2 if we have 3D.
346 list<shared_ptr<PlayerVideo> >
347 Player::get_video (DCPTime time, bool accurate)
349 if (!_have_valid_pieces) {
353 /* Find subtitles for possible burn-in */
355 PlayerSubtitles ps = get_subtitles (time, DCPTime::from_frames (1, _film->video_frame_rate ()), false, true, accurate);
357 list<PositionImage> sub_images;
359 /* Image subtitles */
360 list<PositionImage> c = transform_image_subtitles (ps.image);
361 copy (c.begin(), c.end(), back_inserter (sub_images));
363 /* Text subtitles (rendered to an image) */
364 if (!ps.text.empty ()) {
365 list<PositionImage> s = render_subtitles (ps.text, ps.fonts, _video_container_size);
366 copy (s.begin (), s.end (), back_inserter (sub_images));
369 optional<PositionImage> subtitles;
370 if (!sub_images.empty ()) {
371 subtitles = merge (sub_images);
374 /* Find pieces containing video which is happening now */
376 list<shared_ptr<Piece> > ov = overlaps (
378 time + DCPTime::from_frames (1, _film->video_frame_rate ()),
382 list<shared_ptr<PlayerVideo> > pvf;
385 /* No video content at this time */
386 pvf.push_back (black_player_video_frame (time));
388 /* Some video content at this time */
389 shared_ptr<Piece> last = *(ov.rbegin ());
390 VideoFrameType const last_type = last->content->video->frame_type ();
392 /* Get video from appropriate piece(s) */
393 BOOST_FOREACH (shared_ptr<Piece> piece, ov) {
395 shared_ptr<VideoDecoder> decoder = piece->decoder->video;
396 DCPOMATIC_ASSERT (decoder);
398 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (piece->content);
399 if (dcp_content && dcp_content->reference_video () && !_play_referenced) {
404 /* always use the last video */
406 /* with a corresponding L/R eye if appropriate */
407 (last_type == VIDEO_FRAME_TYPE_3D_LEFT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_RIGHT) ||
408 (last_type == VIDEO_FRAME_TYPE_3D_RIGHT && piece->content->video->frame_type() == VIDEO_FRAME_TYPE_3D_LEFT);
411 /* We want to use this piece */
412 list<ContentVideo> content_video = decoder->get (dcp_to_content_video (piece, time), accurate);
413 if (content_video.empty ()) {
414 pvf.push_back (black_player_video_frame (time));
416 dcp::Size image_size = piece->content->video->scale().size (
417 piece->content->video, _video_container_size, _film->frame_size ()
420 for (list<ContentVideo>::const_iterator i = content_video.begin(); i != content_video.end(); ++i) {
422 shared_ptr<PlayerVideo> (
425 content_video_to_dcp (piece, i->frame),
426 piece->content->video->crop (),
427 piece->content->video->fade (i->frame),
429 _video_container_size,
432 piece->content->video->colour_conversion ()
439 /* Discard unused video */
440 decoder->get (dcp_to_content_video (piece, time), accurate);
446 BOOST_FOREACH (shared_ptr<PlayerVideo> p, pvf) {
447 p->set_subtitle (subtitles.get ());
454 /** @return Audio data or 0 if the only audio data here is referenced DCP data */
455 shared_ptr<AudioBuffers>
456 Player::get_audio (DCPTime time, DCPTime length, bool accurate)
458 if (!_have_valid_pieces) {
462 Frame const length_frames = length.frames_round (_film->audio_frame_rate ());
464 shared_ptr<AudioBuffers> audio (new AudioBuffers (_film->audio_channels(), length_frames));
465 audio->make_silent ();
467 list<shared_ptr<Piece> > ov = overlaps (time, time + length, has_audio);
472 bool all_referenced = true;
473 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
474 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> (i->content);
475 if (i->content->audio && (!dcp_content || !dcp_content->reference_audio ())) {
476 /* There is audio content which is not from a DCP or not set to be referenced */
477 all_referenced = false;
481 if (all_referenced && !_play_referenced) {
482 return shared_ptr<AudioBuffers> ();
485 BOOST_FOREACH (shared_ptr<Piece> i, ov) {
487 DCPOMATIC_ASSERT (i->content->audio);
488 shared_ptr<AudioDecoder> decoder = i->decoder->audio;
489 DCPOMATIC_ASSERT (decoder);
491 /* The time that we should request from the content */
492 DCPTime request = time - DCPTime::from_seconds (i->content->audio->delay() / 1000.0);
493 Frame request_frames = length_frames;
495 if (request < DCPTime ()) {
496 /* We went off the start of the content, so we will need to offset
497 the stuff we get back.
500 request_frames += request.frames_round (_film->audio_frame_rate ());
501 if (request_frames < 0) {
504 request = DCPTime ();
507 Frame const content_frame = dcp_to_resampled_audio (i, request);
509 BOOST_FOREACH (AudioStreamPtr j, i->content->audio->streams ()) {
511 if (j->channels() == 0) {
512 /* Some content (e.g. DCPs) can have streams with no channels */
516 /* Audio from this piece's decoder stream (which might be more or less than what we asked for) */
517 ContentAudio all = decoder->get (j, content_frame, request_frames, accurate);
520 if (i->content->audio->gain() != 0) {
521 shared_ptr<AudioBuffers> gain (new AudioBuffers (all.audio));
522 gain->apply_gain (i->content->audio->gain ());
527 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), all.audio->frames()));
528 dcp_mapped->make_silent ();
529 AudioMapping map = j->mapping ();
530 for (int i = 0; i < map.input_channels(); ++i) {
531 for (int j = 0; j < _film->audio_channels(); ++j) {
532 if (map.get (i, j) > 0) {
533 dcp_mapped->accumulate_channel (
543 if (_audio_processor) {
544 dcp_mapped = _audio_processor->run (dcp_mapped, _film->audio_channels ());
547 all.audio = dcp_mapped;
549 audio->accumulate_frames (
551 content_frame - all.frame,
552 offset.frames_round (_film->audio_frame_rate()),
553 min (Frame (all.audio->frames()), request_frames)
562 Player::dcp_to_content_video (shared_ptr<const Piece> piece, DCPTime t) const
564 DCPTime s = t - piece->content->position ();
565 s = min (piece->content->length_after_trim(), s);
566 s = max (DCPTime(), s + DCPTime (piece->content->trim_start(), piece->frc));
568 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
569 then convert that ContentTime to frames at the content's rate. However this fails for
570 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
571 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
573 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
575 return s.frames_floor (piece->frc.dcp) / piece->frc.factor ();
579 Player::content_video_to_dcp (shared_ptr<const Piece> piece, Frame f) const
581 /* See comment in dcp_to_content_video */
582 DCPTime const d = DCPTime::from_frames (f * piece->frc.factor(), piece->frc.dcp) - DCPTime (piece->content->trim_start (), piece->frc);
583 return max (DCPTime (), d + piece->content->position ());
587 Player::dcp_to_resampled_audio (shared_ptr<const Piece> piece, DCPTime t) const
589 DCPTime s = t - piece->content->position ();
590 s = min (piece->content->length_after_trim(), s);
591 /* See notes in dcp_to_content_video */
592 return max (DCPTime (), DCPTime (piece->content->trim_start (), piece->frc) + s).frames_floor (_film->audio_frame_rate ());
596 Player::dcp_to_content_subtitle (shared_ptr<const Piece> piece, DCPTime t) const
598 DCPTime s = t - piece->content->position ();
599 s = min (piece->content->length_after_trim(), s);
600 return max (ContentTime (), ContentTime (s, piece->frc) + piece->content->trim_start());
604 Player::content_subtitle_to_dcp (shared_ptr<const Piece> piece, ContentTime t) const
606 return max (DCPTime (), DCPTime (t - piece->content->trim_start(), piece->frc) + piece->content->position());
609 /** @param burnt true to return only subtitles to be burnt, false to return only
610 * subtitles that should not be burnt. This parameter will be ignored if
611 * _always_burn_subtitles is true; in this case, all subtitles will be returned.
614 Player::get_subtitles (DCPTime time, DCPTime length, bool starting, bool burnt, bool accurate)
616 list<shared_ptr<Piece> > subs = overlaps (time, time + length, has_subtitle);
618 PlayerSubtitles ps (time, length);
620 for (list<shared_ptr<Piece> >::const_iterator j = subs.begin(); j != subs.end(); ++j) {
621 if (!(*j)->content->subtitle->use () || (!_always_burn_subtitles && (burnt != (*j)->content->subtitle->burn ()))) {
625 shared_ptr<DCPContent> dcp_content = dynamic_pointer_cast<DCPContent> ((*j)->content);
626 if (dcp_content && dcp_content->reference_subtitle () && !_play_referenced) {
630 shared_ptr<SubtitleDecoder> subtitle_decoder = (*j)->decoder->subtitle;
631 ContentTime const from = dcp_to_content_subtitle (*j, time);
632 /* XXX: this video_frame_rate() should be the rate that the subtitle content has been prepared for */
633 ContentTime const to = from + ContentTime::from_frames (1, _film->video_frame_rate ());
635 list<ContentImageSubtitle> image = subtitle_decoder->get_image (ContentTimePeriod (from, to), starting, accurate);
636 for (list<ContentImageSubtitle>::iterator i = image.begin(); i != image.end(); ++i) {
638 /* Apply content's subtitle offsets */
639 i->sub.rectangle.x += (*j)->content->subtitle->x_offset ();
640 i->sub.rectangle.y += (*j)->content->subtitle->y_offset ();
642 /* Apply content's subtitle scale */
643 i->sub.rectangle.width *= (*j)->content->subtitle->x_scale ();
644 i->sub.rectangle.height *= (*j)->content->subtitle->y_scale ();
646 /* Apply a corrective translation to keep the subtitle centred after that scale */
647 i->sub.rectangle.x -= i->sub.rectangle.width * ((*j)->content->subtitle->x_scale() - 1);
648 i->sub.rectangle.y -= i->sub.rectangle.height * ((*j)->content->subtitle->y_scale() - 1);
650 ps.image.push_back (i->sub);
653 list<ContentTextSubtitle> text = subtitle_decoder->get_text (ContentTimePeriod (from, to), starting, accurate);
654 BOOST_FOREACH (ContentTextSubtitle& ts, text) {
655 BOOST_FOREACH (dcp::SubtitleString s, ts.subs) {
656 s.set_h_position (s.h_position() + (*j)->content->subtitle->x_offset ());
657 s.set_v_position (s.v_position() + (*j)->content->subtitle->y_offset ());
658 float const xs = (*j)->content->subtitle->x_scale();
659 float const ys = (*j)->content->subtitle->y_scale();
660 float size = s.size();
662 /* Adjust size to express the common part of the scaling;
663 e.g. if xs = ys = 0.5 we scale size by 2.
665 if (xs > 1e-5 && ys > 1e-5) {
666 size *= 1 / min (1 / xs, 1 / ys);
670 /* Then express aspect ratio changes */
671 if (fabs (1.0 - xs / ys) > dcp::ASPECT_ADJUST_EPSILON) {
672 s.set_aspect_adjust (xs / ys);
674 s.set_in (dcp::Time(content_subtitle_to_dcp (*j, ts.period().from).seconds(), 1000));
675 s.set_out (dcp::Time(content_subtitle_to_dcp (*j, ts.period().to).seconds(), 1000));
676 ps.text.push_back (s);
677 ps.add_fonts ((*j)->content->subtitle->fonts ());
685 list<shared_ptr<Font> >
686 Player::get_subtitle_fonts ()
688 if (!_have_valid_pieces) {
692 list<shared_ptr<Font> > fonts;
693 BOOST_FOREACH (shared_ptr<Piece>& p, _pieces) {
694 if (p->content->subtitle) {
695 /* XXX: things may go wrong if there are duplicate font IDs
696 with different font files.
698 list<shared_ptr<Font> > f = p->content->subtitle->fonts ();
699 copy (f.begin(), f.end(), back_inserter (fonts));
706 /** Set this player never to produce any video data */
708 Player::set_ignore_video ()
710 _ignore_video = true;
713 /** Set this player never to produce any audio data */
715 Player::set_ignore_audio ()
717 _ignore_audio = true;
720 /** Set whether or not this player should always burn text subtitles into the image,
721 * regardless of the content settings.
722 * @param burn true to always burn subtitles, false to obey content settings.
725 Player::set_always_burn_subtitles (bool burn)
727 _always_burn_subtitles = burn;
734 _have_valid_pieces = false;
738 Player::set_play_referenced ()
740 _play_referenced = true;
741 _have_valid_pieces = false;
744 list<ReferencedReelAsset>
745 Player::get_reel_assets ()
747 list<ReferencedReelAsset> a;
749 BOOST_FOREACH (shared_ptr<Content> i, _playlist->content ()) {
750 shared_ptr<DCPContent> j = dynamic_pointer_cast<DCPContent> (i);
755 scoped_ptr<DCPDecoder> decoder;
757 decoder.reset (new DCPDecoder (j, _film->log(), false));
763 BOOST_FOREACH (shared_ptr<dcp::Reel> k, decoder->reels()) {
764 DCPTime const from = i->position() + DCPTime::from_frames (offset, _film->video_frame_rate());
765 if (j->reference_video ()) {
767 ReferencedReelAsset (
769 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_picture()->duration(), _film->video_frame_rate()))
774 if (j->reference_audio ()) {
776 ReferencedReelAsset (
778 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_sound()->duration(), _film->video_frame_rate()))
783 if (j->reference_subtitle ()) {
784 DCPOMATIC_ASSERT (k->main_subtitle ());
786 ReferencedReelAsset (
788 DCPTimePeriod (from, from + DCPTime::from_frames (k->main_subtitle()->duration(), _film->video_frame_rate()))
793 /* Assume that main picture duration is the length of the reel */
794 offset += k->main_picture()->duration ();
801 list<shared_ptr<Piece> >
802 Player::overlaps (DCPTime from, DCPTime to, boost::function<bool (Content *)> valid)
804 if (!_have_valid_pieces) {
808 list<shared_ptr<Piece> > overlaps;
809 BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
810 if (valid (i->content.get ()) && i->content->position() < to && i->content->end() > from) {
811 overlaps.push_back (i);