2 Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "ffmpeg_decoder.h"
24 #include "ffmpeg_content.h"
25 #include "still_image_decoder.h"
26 #include "still_image_content.h"
27 #include "moving_image_decoder.h"
28 #include "moving_image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
36 #include "resampler.h"
47 using boost::shared_ptr;
48 using boost::weak_ptr;
49 using boost::dynamic_pointer_cast;
54 Piece (shared_ptr<Content> c)
56 , video_position (c->position ())
57 , audio_position (c->position ())
60 Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
63 , video_position (c->position ())
64 , audio_position (c->position ())
67 shared_ptr<Content> content;
68 shared_ptr<Decoder> decoder;
73 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
78 , _have_valid_pieces (false)
81 , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
82 , _last_emit_was_black (false)
84 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
85 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
86 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
87 set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
91 Player::disable_video ()
97 Player::disable_audio ()
105 if (!_have_valid_pieces) {
107 _have_valid_pieces = true;
110 Time earliest_t = TIME_MAX;
111 shared_ptr<Piece> earliest;
117 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
118 if ((*i)->decoder->done ()) {
122 if (_video && dynamic_pointer_cast<VideoDecoder> ((*i)->decoder)) {
123 if ((*i)->video_position < earliest_t) {
124 earliest_t = (*i)->video_position;
130 if (_audio && dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
131 if ((*i)->audio_position < earliest_t) {
132 earliest_t = (*i)->audio_position;
146 if (earliest_t > _video_position) {
149 earliest->decoder->pass ();
154 if (earliest_t > _audio_position) {
155 emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
157 earliest->decoder->pass ();
159 if (earliest->decoder->done()) {
160 shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
162 shared_ptr<Resampler> re = resampler (ac, false);
164 shared_ptr<const AudioBuffers> b = re->flush ();
166 process_audio (earliest, b, ac->audio_length ());
175 Time audio_done_up_to = TIME_MAX;
176 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
177 if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
178 audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
182 TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
183 Audio (tb.audio, tb.time);
184 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
191 Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
193 /* Keep a note of what came in so that we can repeat it if required */
194 _last_process_video.weak_piece = weak_piece;
195 _last_process_video.image = image;
196 _last_process_video.eyes = eyes;
197 _last_process_video.same = same;
198 _last_process_video.frame = frame;
200 shared_ptr<Piece> piece = weak_piece.lock ();
205 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
208 FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
209 if (frc.skip && (frame % 2) == 1) {
213 Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
214 if (content->trimmed (relative_time)) {
218 /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
219 shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
221 work_image = work_image->crop (content->crop(), true);
223 float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
224 libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
226 work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
228 Time time = content->position() + relative_time - content->trim_start ();
230 if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
231 work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
234 if (image_size != _video_container_size) {
235 assert (image_size.width <= _video_container_size.width);
236 assert (image_size.height <= _video_container_size.height);
237 shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
239 im->copy (work_image, Position<int> ((_video_container_size.width - image_size.width) / 2, (_video_container_size.height - image_size.height) / 2));
243 #ifdef DCPOMATIC_DEBUG
244 _last_video = piece->content;
247 Video (work_image, eyes, content->colour_conversion(), same, time);
248 time += TIME_HZ / _film->video_frame_rate();
250 _last_emit_was_black = false;
252 _video_position = piece->video_position = time;
256 Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
258 shared_ptr<Piece> piece = weak_piece.lock ();
263 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
267 if (content->audio_gain() != 0) {
268 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
269 gain->apply_gain (content->audio_gain ());
274 if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
275 shared_ptr<Resampler> r = resampler (content, true);
276 pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
281 Time const relative_time = _film->audio_frames_to_time (frame);
283 if (content->trimmed (relative_time)) {
287 Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
290 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
291 dcp_mapped->make_silent ();
292 list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
293 for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
294 if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
295 dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
301 /* We must cut off anything that comes before the start of all time */
303 int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
304 if (frames >= audio->frames ()) {
308 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
309 trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
315 _audio_merger.push (audio, time);
316 piece->audio_position += _film->audio_frames_to_time (audio->frames ());
322 TimedAudioBuffers<Time> tb = _audio_merger.flush ();
324 Audio (tb.audio, tb.time);
325 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
328 while (_video_position < _audio_position) {
332 while (_audio_position < _video_position) {
333 emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
338 /** Seek so that the next pass() will yield (approximately) the requested frame.
339 * Pass accurate = true to try harder to get close to the request.
340 * @return true on error
343 Player::seek (Time t, bool accurate)
345 if (!_have_valid_pieces) {
347 _have_valid_pieces = true;
350 if (_pieces.empty ()) {
354 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
355 shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
360 Time s = t - vc->position ();
361 s = max (static_cast<Time> (0), s);
362 s = min (vc->length_after_trim(), s);
364 (*i)->video_position = (*i)->audio_position = vc->position() + s;
366 FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
367 /* Here we are converting from time (in the DCP) to a frame number in the content.
368 Hence we need to use the DCP's frame rate and the double/skip correction, not
371 VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
372 dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
375 _video_position = _audio_position = t;
377 /* XXX: don't seek audio because we don't need to... */
381 Player::setup_pieces ()
383 list<shared_ptr<Piece> > old_pieces = _pieces;
387 ContentList content = _playlist->content ();
388 sort (content.begin(), content.end(), ContentSorter ());
390 for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
392 shared_ptr<Piece> piece (new Piece (*i));
394 /* XXX: into content? */
396 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
398 shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
400 fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
401 fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
402 fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
407 shared_ptr<const StillImageContent> ic = dynamic_pointer_cast<const StillImageContent> (*i);
409 shared_ptr<StillImageDecoder> id;
411 /* See if we can re-use an old StillImageDecoder */
412 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
413 shared_ptr<StillImageDecoder> imd = dynamic_pointer_cast<StillImageDecoder> ((*j)->decoder);
414 if (imd && imd->content() == ic) {
420 id.reset (new StillImageDecoder (_film, ic));
421 id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
427 shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
429 shared_ptr<MovingImageDecoder> md;
432 md.reset (new MovingImageDecoder (_film, mc));
433 md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
439 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
441 shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
442 sd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
447 _pieces.push_back (piece);
452 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
454 shared_ptr<Content> c = w.lock ();
460 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
461 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END
464 _have_valid_pieces = false;
467 } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
473 property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP ||
474 property == VideoContentProperty::VIDEO_RATIO
482 Player::playlist_changed ()
484 _have_valid_pieces = false;
489 Player::set_video_container_size (libdcp::Size s)
491 _video_container_size = s;
492 _black_frame.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
493 _black_frame->make_black ();
496 shared_ptr<Resampler>
497 Player::resampler (shared_ptr<AudioContent> c, bool create)
499 map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
500 if (i != _resamplers.end ()) {
505 return shared_ptr<Resampler> ();
508 shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
514 Player::emit_black ()
516 #ifdef DCPOMATIC_DEBUG
517 _last_video.reset ();
520 Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
521 _video_position += _film->video_frames_to_time (1);
522 _last_emit_was_black = true;
526 Player::emit_silence (OutputAudioFrame most)
532 OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
533 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
534 silence->make_silent ();
535 Audio (silence, _audio_position);
536 _audio_position += _film->audio_frames_to_time (N);
540 Player::film_changed (Film::Property p)
542 /* Here we should notice Film properties that affect our output, and
543 alert listeners that our output now would be different to how it was
544 last time we were run.
547 if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
553 Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
555 _in_subtitle.piece = weak_piece;
556 _in_subtitle.image = image;
557 _in_subtitle.rect = rect;
558 _in_subtitle.from = from;
559 _in_subtitle.to = to;
565 Player::update_subtitle ()
567 shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
572 if (!_in_subtitle.image) {
573 _out_subtitle.image.reset ();
577 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
580 dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
581 libdcp::Size scaled_size;
583 in_rect.y += sc->subtitle_offset ();
585 /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
586 scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
587 scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
589 /* Then we need a corrective translation, consisting of two parts:
591 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
592 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
594 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
595 * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
596 * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
598 * Combining these two translations gives these expressions.
601 _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
602 _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
604 _out_subtitle.image = _in_subtitle.image->scale (
606 Scaler::from_id ("bicubic"),
607 _in_subtitle.image->pixel_format (),
610 _out_subtitle.from = _in_subtitle.from + piece->content->position ();
611 _out_subtitle.to = _in_subtitle.to + piece->content->position ();
614 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
615 * @return false if this could not be done.
618 Player::repeat_last_video ()
620 if (!_last_process_video.image) {
625 _last_process_video.weak_piece,
626 _last_process_video.image,
627 _last_process_video.eyes,
628 _last_process_video.same,
629 _last_process_video.frame