using boost::dynamic_pointer_cast;
using dcp::Size;
-FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log)
+FFmpegDecoder::FFmpegDecoder (shared_ptr<const FFmpegContent> c, shared_ptr<Log> log, bool fast)
: FFmpeg (c)
, _log (log)
, _have_current_subtitle (false)
}
if (c->audio) {
- audio.reset (new AudioDecoder (this, c->audio, log));
+ audio.reset (new AudioDecoder (this, c->audio, log, fast));
}
if (c->subtitle) {
- subtitle.reset (new SubtitleDecoder (this, c->subtitle, log));
+ /* XXX: this time here should be the time of the first subtitle, not 0 */
+ subtitle.reset (new SubtitleDecoder (this, c->subtitle, log, ContentTime()));
}
_next_time.resize (_format_context->nb_streams);
of the block that do not form a complete sample or frame they will be dropped.
*/
int const total_samples = size / bytes_per_audio_sample (stream);
- int const frames = total_samples / stream->channels();
- shared_ptr<AudioBuffers> audio (new AudioBuffers (stream->channels(), frames));
+ int const channels = stream->channels();
+ int const frames = total_samples / channels;
+ shared_ptr<AudioBuffers> audio (new AudioBuffers (channels, frames));
+ float** data = audio->data();
switch (audio_sample_format (stream)) {
case AV_SAMPLE_FMT_U8:
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = float(*p++) / (1 << 23);
+ data[channel][sample] = float(*p++) / (1 << 23);
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = float(*p++) / (1 << 15);
+ data[channel][sample] = float(*p++) / (1 << 15);
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
case AV_SAMPLE_FMT_S16P:
{
int16_t** p = reinterpret_cast<int16_t **> (_frame->data);
- for (int i = 0; i < stream->channels(); ++i) {
+ for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
- audio->data(i)[j] = static_cast<float>(p[i][j]) / (1 << 15);
+ data[i][j] = static_cast<float>(p[i][j]) / (1 << 15);
}
}
}
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = static_cast<float>(*p++) / 2147483648;
+ data[channel][sample] = static_cast<float>(*p++) / 2147483648;
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
case AV_SAMPLE_FMT_S32P:
{
int32_t** p = reinterpret_cast<int32_t **> (_frame->data);
- for (int i = 0; i < stream->channels(); ++i) {
+ for (int i = 0; i < channels; ++i) {
for (int j = 0; j < frames; ++j) {
- audio->data(i)[j] = static_cast<float>(p[i][j]) / 2147483648;
+ data[i][j] = static_cast<float>(p[i][j]) / 2147483648;
}
}
}
int sample = 0;
int channel = 0;
for (int i = 0; i < total_samples; ++i) {
- audio->data(channel)[sample] = *p++;
+ data[channel][sample] = *p++;
++channel;
- if (channel == stream->channels()) {
+ if (channel == channels) {
channel = 0;
++sample;
}
float** p = reinterpret_cast<float**> (_frame->data);
/* Sometimes there aren't as many channels in the _frame as in the stream */
for (int i = 0; i < _frame->channels; ++i) {
- memcpy (audio->data(i), p[i], frames * sizeof(float));
+ memcpy (data[i], p[i], frames * sizeof(float));
}
- for (int i = _frame->channels; i < stream->channels(); ++i) {
+ for (int i = _frame->channels; i < channels; ++i) {
audio->make_silent (i);
}
}
_have_current_subtitle = true;
if (sub_period.to) {
_current_subtitle_to = *sub_period.to + _pts_offset;
+ } else {
+ _current_subtitle_to = optional<ContentTime>();
}
for (unsigned int i = 0; i < sub.num_rects; ++i) {
}
}
+ if (_current_subtitle_to) {
+ subtitle->emit_stop (*_current_subtitle_to);
+ }
+
avsubtitle_free (&sub);
}