2 Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "ffmpeg_decoder.h"
24 #include "ffmpeg_content.h"
25 #include "still_image_decoder.h"
26 #include "still_image_content.h"
27 #include "moving_image_decoder.h"
28 #include "moving_image_content.h"
29 #include "sndfile_decoder.h"
30 #include "sndfile_content.h"
31 #include "subtitle_content.h"
36 #include "resampler.h"
47 using boost::shared_ptr;
48 using boost::weak_ptr;
49 using boost::dynamic_pointer_cast;
54 Piece (shared_ptr<Content> c)
56 , video_position (c->position ())
57 , audio_position (c->position ())
60 Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
63 , video_position (c->position ())
64 , audio_position (c->position ())
67 shared_ptr<Content> content;
68 shared_ptr<Decoder> decoder;
73 Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
78 , _have_valid_pieces (false)
81 , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1))
82 , _last_emit_was_black (false)
84 _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
85 _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
86 _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
87 set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
91 Player::disable_video ()
97 Player::disable_audio ()
105 if (!_have_valid_pieces) {
107 _have_valid_pieces = true;
110 Time earliest_t = TIME_MAX;
111 shared_ptr<Piece> earliest;
117 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
118 if ((*i)->decoder->done ()) {
122 if (_video && dynamic_pointer_cast<VideoDecoder> ((*i)->decoder)) {
123 if ((*i)->video_position < earliest_t) {
124 earliest_t = (*i)->video_position;
130 if (_audio && dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
131 if ((*i)->audio_position < earliest_t) {
132 earliest_t = (*i)->audio_position;
146 if (earliest_t > _video_position) {
149 earliest->decoder->pass ();
154 if (earliest_t > _audio_position) {
155 emit_silence (_film->time_to_audio_frames (earliest_t - _audio_position));
157 earliest->decoder->pass ();
159 if (earliest->decoder->done()) {
160 shared_ptr<AudioContent> ac = dynamic_pointer_cast<AudioContent> (earliest->content);
162 shared_ptr<Resampler> re = resampler (ac, false);
164 shared_ptr<const AudioBuffers> b = re->flush ();
166 process_audio (earliest, b, ac->audio_length ());
175 Time audio_done_up_to = TIME_MAX;
176 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
177 if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
178 audio_done_up_to = min (audio_done_up_to, (*i)->audio_position);
182 TimedAudioBuffers<Time> tb = _audio_merger.pull (audio_done_up_to);
183 Audio (tb.audio, tb.time);
184 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
191 Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame)
193 /* Keep a note of what came in so that we can repeat it if required */
194 _last_process_video.weak_piece = weak_piece;
195 _last_process_video.image = image;
196 _last_process_video.eyes = eyes;
197 _last_process_video.same = same;
198 _last_process_video.frame = frame;
200 shared_ptr<Piece> piece = weak_piece.lock ();
205 shared_ptr<VideoContent> content = dynamic_pointer_cast<VideoContent> (piece->content);
208 FrameRateConversion frc (content->video_frame_rate(), _film->video_frame_rate());
209 if (frc.skip && (frame % 2) == 1) {
213 Time const relative_time = (frame * frc.factor() * TIME_HZ / _film->video_frame_rate());
214 if (content->trimmed (relative_time)) {
218 /* Convert to RGB first, as FFmpeg doesn't seem to like handling YUV images with odd widths */
219 shared_ptr<Image> work_image = image->scale (image->size (), _film->scaler(), PIX_FMT_RGB24, true);
221 work_image = work_image->crop (content->crop(), true);
223 float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
224 libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size);
226 work_image = work_image->scale (image_size, _film->scaler(), PIX_FMT_RGB24, true);
228 Time time = content->position() + relative_time - content->trim_start ();
230 if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
231 work_image->alpha_blend (_out_subtitle.image, _out_subtitle.position);
234 if (image_size != _video_container_size) {
235 assert (image_size.width <= _video_container_size.width);
236 assert (image_size.height <= _video_container_size.height);
237 shared_ptr<Image> im (new Image (PIX_FMT_RGB24, _video_container_size, true));
239 im->copy (work_image, Position<int> ((_video_container_size.width - image_size.width) / 2, (_video_container_size.height - image_size.height) / 2));
243 #ifdef DCPOMATIC_DEBUG
244 _last_video = piece->content;
247 Video (work_image, eyes, content->colour_conversion(), same, time);
248 time += TIME_HZ / _film->video_frame_rate();
251 Video (work_image, eyes, content->colour_conversion(), true, time);
252 time += TIME_HZ / _film->video_frame_rate();
255 _last_emit_was_black = false;
257 _video_position = piece->video_position = time;
261 Player::process_audio (weak_ptr<Piece> weak_piece, shared_ptr<const AudioBuffers> audio, AudioContent::Frame frame)
263 shared_ptr<Piece> piece = weak_piece.lock ();
268 shared_ptr<AudioContent> content = dynamic_pointer_cast<AudioContent> (piece->content);
272 if (content->audio_gain() != 0) {
273 shared_ptr<AudioBuffers> gain (new AudioBuffers (audio));
274 gain->apply_gain (content->audio_gain ());
279 if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) {
280 shared_ptr<Resampler> r = resampler (content, true);
281 pair<shared_ptr<const AudioBuffers>, AudioContent::Frame> ro = r->run (audio, frame);
286 Time const relative_time = _film->audio_frames_to_time (frame);
288 if (content->trimmed (relative_time)) {
292 Time time = content->position() + (content->audio_delay() * TIME_HZ / 1000) + relative_time;
295 shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
296 dcp_mapped->make_silent ();
297 list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
298 for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
299 if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
300 dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
306 /* We must cut off anything that comes before the start of all time */
308 int const frames = - time * _film->audio_frame_rate() / TIME_HZ;
309 if (frames >= audio->frames ()) {
313 shared_ptr<AudioBuffers> trimmed (new AudioBuffers (audio->channels(), audio->frames() - frames));
314 trimmed->copy_from (audio.get(), audio->frames() - frames, frames, 0);
320 _audio_merger.push (audio, time);
321 piece->audio_position += _film->audio_frames_to_time (audio->frames ());
327 TimedAudioBuffers<Time> tb = _audio_merger.flush ();
329 Audio (tb.audio, tb.time);
330 _audio_position += _film->audio_frames_to_time (tb.audio->frames ());
333 while (_video_position < _audio_position) {
337 while (_audio_position < _video_position) {
338 emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
343 /** Seek so that the next pass() will yield (approximately) the requested frame.
344 * Pass accurate = true to try harder to get close to the request.
345 * @return true on error
348 Player::seek (Time t, bool accurate)
350 if (!_have_valid_pieces) {
352 _have_valid_pieces = true;
355 if (_pieces.empty ()) {
359 for (list<shared_ptr<Piece> >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) {
360 shared_ptr<VideoContent> vc = dynamic_pointer_cast<VideoContent> ((*i)->content);
365 Time s = t - vc->position ();
366 s = max (static_cast<Time> (0), s);
367 s = min (vc->length_after_trim(), s);
369 (*i)->video_position = (*i)->audio_position = vc->position() + s;
371 FrameRateConversion frc (vc->video_frame_rate(), _film->video_frame_rate());
372 /* Here we are converting from time (in the DCP) to a frame number in the content.
373 Hence we need to use the DCP's frame rate and the double/skip correction, not
376 VideoContent::Frame f = (s + vc->trim_start ()) * _film->video_frame_rate() / (frc.factor() * TIME_HZ);
377 dynamic_pointer_cast<VideoDecoder>((*i)->decoder)->seek (f, accurate);
380 _video_position = _audio_position = t;
382 /* XXX: don't seek audio because we don't need to... */
386 Player::setup_pieces ()
388 list<shared_ptr<Piece> > old_pieces = _pieces;
392 ContentList content = _playlist->content ();
393 sort (content.begin(), content.end(), ContentSorter ());
395 for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
397 shared_ptr<Piece> piece (new Piece (*i));
399 /* XXX: into content? */
401 shared_ptr<const FFmpegContent> fc = dynamic_pointer_cast<const FFmpegContent> (*i);
403 shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
405 fd->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
406 fd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
407 fd->Subtitle.connect (bind (&Player::process_subtitle, this, piece, _1, _2, _3, _4));
412 shared_ptr<const StillImageContent> ic = dynamic_pointer_cast<const StillImageContent> (*i);
414 shared_ptr<StillImageDecoder> id;
416 /* See if we can re-use an old StillImageDecoder */
417 for (list<shared_ptr<Piece> >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) {
418 shared_ptr<StillImageDecoder> imd = dynamic_pointer_cast<StillImageDecoder> ((*j)->decoder);
419 if (imd && imd->content() == ic) {
425 id.reset (new StillImageDecoder (_film, ic));
426 id->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
432 shared_ptr<const MovingImageContent> mc = dynamic_pointer_cast<const MovingImageContent> (*i);
434 shared_ptr<MovingImageDecoder> md;
437 md.reset (new MovingImageDecoder (_film, mc));
438 md->Video.connect (bind (&Player::process_video, this, piece, _1, _2, _3, _4));
444 shared_ptr<const SndfileContent> sc = dynamic_pointer_cast<const SndfileContent> (*i);
446 shared_ptr<AudioDecoder> sd (new SndfileDecoder (_film, sc));
447 sd->Audio.connect (bind (&Player::process_audio, this, piece, _1, _2));
452 _pieces.push_back (piece);
457 Player::content_changed (weak_ptr<Content> w, int property, bool frequent)
459 shared_ptr<Content> c = w.lock ();
465 property == ContentProperty::POSITION || property == ContentProperty::LENGTH ||
466 property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END
469 _have_valid_pieces = false;
472 } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
478 property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP ||
479 property == VideoContentProperty::VIDEO_RATIO
487 Player::playlist_changed ()
489 _have_valid_pieces = false;
494 Player::set_video_container_size (libdcp::Size s)
496 _video_container_size = s;
497 _black_frame.reset (new Image (PIX_FMT_RGB24, _video_container_size, true));
498 _black_frame->make_black ();
501 shared_ptr<Resampler>
502 Player::resampler (shared_ptr<AudioContent> c, bool create)
504 map<shared_ptr<AudioContent>, shared_ptr<Resampler> >::iterator i = _resamplers.find (c);
505 if (i != _resamplers.end ()) {
510 return shared_ptr<Resampler> ();
513 shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
519 Player::emit_black ()
521 #ifdef DCPOMATIC_DEBUG
522 _last_video.reset ();
525 Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
526 _video_position += _film->video_frames_to_time (1);
527 _last_emit_was_black = true;
531 Player::emit_silence (OutputAudioFrame most)
537 OutputAudioFrame N = min (most, _film->audio_frame_rate() / 2);
538 shared_ptr<AudioBuffers> silence (new AudioBuffers (_film->audio_channels(), N));
539 silence->make_silent ();
540 Audio (silence, _audio_position);
541 _audio_position += _film->audio_frames_to_time (N);
545 Player::film_changed (Film::Property p)
547 /* Here we should notice Film properties that affect our output, and
548 alert listeners that our output now would be different to how it was
549 last time we were run.
552 if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
558 Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
560 _in_subtitle.piece = weak_piece;
561 _in_subtitle.image = image;
562 _in_subtitle.rect = rect;
563 _in_subtitle.from = from;
564 _in_subtitle.to = to;
570 Player::update_subtitle ()
572 shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
577 if (!_in_subtitle.image) {
578 _out_subtitle.image.reset ();
582 shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
585 dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
586 libdcp::Size scaled_size;
588 in_rect.y += sc->subtitle_offset ();
590 /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
591 scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
592 scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
594 /* Then we need a corrective translation, consisting of two parts:
596 * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
597 * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
599 * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
600 * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
601 * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
603 * Combining these two translations gives these expressions.
606 _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
607 _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
609 _out_subtitle.image = _in_subtitle.image->scale (
611 Scaler::from_id ("bicubic"),
612 _in_subtitle.image->pixel_format (),
615 _out_subtitle.from = _in_subtitle.from + piece->content->position ();
616 _out_subtitle.to = _in_subtitle.to + piece->content->position ();
619 /** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
620 * @return false if this could not be done.
623 Player::repeat_last_video ()
625 if (!_last_process_video.image) {
630 _last_process_video.weak_piece,
631 _last_process_video.image,
632 _last_process_video.eyes,
633 _last_process_video.same,
634 _last_process_video.frame