X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fdecoder.cc;h=3f4cda6eb5a4345595410fe475314f3d24881bd7;hb=4861afbaab9c3710152d80f15c7a239c26deb74b;hp=673d571c40078d1b7adc2e785f7d5f525bc29968;hpb=83c296968eb998ae5bdb210f37305fc49f308c4f;p=dcpomatic.git diff --git a/src/lib/decoder.cc b/src/lib/decoder.cc index 673d571c4..3f4cda6eb 100644 --- a/src/lib/decoder.cc +++ b/src/lib/decoder.cc @@ -21,334 +21,18 @@ * @brief Parent class for decoders of content. */ -#include -#include -#include #include "film.h" -#include "format.h" -#include "job.h" -#include "options.h" -#include "exceptions.h" -#include "image.h" -#include "util.h" -#include "log.h" #include "decoder.h" -#include "delay_line.h" -#include "subtitle.h" -#include "filter_graph.h" -using std::string; -using std::stringstream; -using std::min; -using std::list; +#include "i18n.h" + using boost::shared_ptr; /** @param f Film. - * @param o Options. - * @param j Job that we are running within, or 0 - * @param minimal true to do the bare minimum of work; just run through the content. Useful for acquiring - * accurate frame counts as quickly as possible. This generates no video or audio output. + * @param o Decode options. */ -Decoder::Decoder (boost::shared_ptr f, boost::shared_ptr o, Job* j, bool minimal) +Decoder::Decoder (shared_ptr f) : _film (f) - , _opt (o) - , _job (j) - , _minimal (minimal) - , _video_frames_in (0) - , _video_frames_out (0) - , _audio_frames_in (0) - , _audio_frames_out (0) - , _delay_line (0) - , _delay_in_bytes (0) -{ - -} - -Decoder::~Decoder () -{ - delete _delay_line; -} - -/** Start off a decode processing run. This should only be called once on - * a given Decoder object. - */ -void -Decoder::process_begin () -{ - _delay_in_bytes = _film->audio_delay() * audio_sample_rate() * audio_channels() * bytes_per_audio_sample() / 1000; - _delay_line = new DelayLine (_delay_in_bytes); -} - -/** Finish off a decode processing run */ -void -Decoder::process_end () -{ - if (_delay_in_bytes < 0) { - /* Empty the delay line */ - uint8_t remainder[-_delay_in_bytes]; - _delay_line->get_remaining (remainder); - emit_audio (remainder, -_delay_in_bytes); - } - - if (_opt->decode_audio) { - - /* Ensure that our video and audio emissions are the same length */ - - int64_t video_frames_out_in_audio_frames = ((int64_t) _video_frames_out * audio_sample_rate() / frames_per_second()); - int64_t audio_short_by_frames = video_frames_out_in_audio_frames - _audio_frames_out; - - _film->log()->log ( - String::compose ("Decoder has emitted %1 video frames (which equals %2 audio frames) and %3 audio frames", - _video_frames_out, - video_frames_out_in_audio_frames, - _audio_frames_out) - ); - - if (audio_short_by_frames < 0) { - - _film->log()->log (String::compose ("Emitted %1 too many audio frames", -audio_short_by_frames)); - - /* We have emitted more audio than video. Emit enough black video frames so that we reverse this */ - int const black_video_frames = ceil (-audio_short_by_frames * frames_per_second() / audio_sample_rate()); - - _film->log()->log (String::compose ("Emitting %1 frames of black video", black_video_frames)); - - shared_ptr black (new CompactImage (pixel_format(), native_size())); - black->make_black (); - for (int i = 0; i < black_video_frames; ++i) { - emit_video (black, shared_ptr ()); - } - - /* Now recompute our check values */ - video_frames_out_in_audio_frames = ((int64_t) _video_frames_out * audio_sample_rate() / frames_per_second()); - audio_short_by_frames = video_frames_out_in_audio_frames - _audio_frames_out; - } - - if (audio_short_by_frames > 0) { - - _film->log()->log (String::compose ("Emitted %1 too few audio frames", audio_short_by_frames)); - - /* XXX: this is slightly questionable; does memset () give silence with all - sample formats? - */ - - int64_t bytes = audio_short_by_frames * _film->audio_channels() * bytes_per_audio_sample(); - - int64_t const silence_size = 16 * 1024 * _film->audio_channels() * bytes_per_audio_sample(); - uint8_t silence[silence_size]; - memset (silence, 0, silence_size); - - while (bytes) { - int64_t const t = min (bytes, silence_size); - emit_audio (silence, t); - bytes -= t; - } - } - } -} - -/** Start decoding */ -void -Decoder::go () -{ - process_begin (); - - if (_job && !_film->dcp_length()) { - _job->set_progress_unknown (); - } - - while (pass () == false) { - if (_job && _film->dcp_length()) { - _job->set_progress (float (_video_frames_out) / _film->dcp_length().get()); - } - } - - process_end (); -} - -/** Called by subclasses to tell the world that some audio data is ready - * @param data Audio data, in Film::audio_sample_format. - * @param size Number of bytes of data. - */ -void -Decoder::process_audio (uint8_t* data, int size) -{ - /* Push into the delay line */ - size = _delay_line->feed (data, size); - - emit_audio (data, size); -} - -void -Decoder::emit_audio (uint8_t* data, int size) -{ - if (size == 0) { - return; - } - - assert (_film->audio_channels()); - assert (bytes_per_audio_sample()); - - /* Deinterleave and convert to float */ - - assert ((size % (bytes_per_audio_sample() * audio_channels())) == 0); - - int const total_samples = size / bytes_per_audio_sample(); - int const frames = total_samples / _film->audio_channels(); - shared_ptr audio (new AudioBuffers (audio_channels(), frames)); - - switch (audio_sample_format()) { - case AV_SAMPLE_FMT_S16: - { - int16_t* p = (int16_t *) data; - int sample = 0; - int channel = 0; - for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = float(*p++) / (1 << 15); - - ++channel; - if (channel == _film->audio_channels()) { - channel = 0; - ++sample; - } - } - } - break; - - case AV_SAMPLE_FMT_S32: - { - int32_t* p = (int32_t *) data; - int sample = 0; - int channel = 0; - for (int i = 0; i < total_samples; ++i) { - audio->data(channel)[sample] = float(*p++) / (1 << 31); - - ++channel; - if (channel == _film->audio_channels()) { - channel = 0; - ++sample; - } - } - } - - case AV_SAMPLE_FMT_FLTP: - { - float* p = reinterpret_cast (data); - for (int i = 0; i < _film->audio_channels(); ++i) { - memcpy (audio->data(i), p, frames * sizeof(float)); - p += frames; - } - } - break; - - default: - assert (false); - } - - /* Maybe apply gain */ - if (_film->audio_gain() != 0) { - float const linear_gain = pow (10, _film->audio_gain() / 20); - for (int i = 0; i < _film->audio_channels(); ++i) { - for (int j = 0; j < frames; ++j) { - audio->data(i)[j] *= linear_gain; - } - } - } - - /* Update the number of audio frames we've pushed to the encoder */ - _audio_frames_out += audio->frames (); - - Audio (audio); -} - -/** Called by subclasses to tell the world that some video data is ready. - * We do some post-processing / filtering then emit it for listeners. - * @param frame to decode; caller manages memory. - */ -void -Decoder::process_video (AVFrame* frame) { - assert (_film->length()); - - if (_minimal) { - ++_video_frames_in; - return; - } - - /* Use Film::length here as our one may be wrong */ - - if (_opt->decode_video_skip != 0 && (_video_frames_in % _opt->decode_video_skip) != 0) { - ++_video_frames_in; - return; - } - - if (_film->dcp_trim_start() > _video_frames_in || (_film->length().get() + _film->dcp_trim_start()) < _video_frames_in) { - ++_video_frames_in; - return; - } - - shared_ptr graph; - list >::iterator i = _filter_graphs.begin(); - while (i != _filter_graphs.end() && !(*i)->can_process (Size (frame->width, frame->height), (AVPixelFormat) frame->format)) { - ++i; - } - - if (i == _filter_graphs.end ()) { - graph.reset (new FilterGraph (_film, this, _opt->apply_crop, Size (frame->width, frame->height), (AVPixelFormat) frame->format)); - _filter_graphs.push_back (graph); - _film->log()->log (String::compose ("New graph for %1x%2, pixel format %3", frame->width, frame->height, frame->format)); - } else { - graph = *i; - } - - list > images = graph->process (frame); - - for (list >::iterator i = images.begin(); i != images.end(); ++i) { - shared_ptr sub; - if (_timed_subtitle && _timed_subtitle->displayed_at (double (video_frames_in()) / _film->frames_per_second())) { - sub = _timed_subtitle->subtitle (); - } - - emit_video (*i, sub); - } -} - -void -Decoder::repeat_last_video () -{ - if (!_last_image) { - _last_image.reset (new CompactImage (pixel_format(), native_size())); - _last_image->make_black (); - } - - emit_video (_last_image, _last_subtitle); -} - -void -Decoder::emit_video (shared_ptr image, shared_ptr sub) -{ - TIMING ("Decoder emits %1", _video_frames_out); - Video (image, _video_frames_out, sub); - ++_video_frames_out; - _last_image = image; - _last_subtitle = sub; -} - -void -Decoder::process_subtitle (shared_ptr s) -{ - _timed_subtitle = s; - - if (_timed_subtitle && _opt->apply_crop) { - Position const p = _timed_subtitle->subtitle()->position (); - _timed_subtitle->subtitle()->set_position (Position (p.x - _film->crop().left, p.y - _film->crop().top)); - } -} - - -int -Decoder::bytes_per_audio_sample () const -{ - return av_get_bytes_per_sample (audio_sample_format ()); }