_pts_offset = - c->audio_stream()->first_audio.get();
}
+ /* If _pts_offset is positive we would be pushing things from a -ve PTS to be played.
+ I don't think we ever want to do that, as it seems things at -ve PTS are not meant
+ to be seen (use for alignment bars etc.); see mantis #418.
+ */
+ if (_pts_offset > ContentTime ()) {
+ _pts_offset = ContentTime ();
+ }
+
/* Now adjust both so that the video pts starts on a frame */
if (have_video && have_audio) {
ContentTime first_video = c->first_video().get() + _pts_offset;
{
int r = av_read_frame (_format_context, &_packet);
- if (r < 0) {
+ /* AVERROR_INVALIDDATA can apparently be returned sometimes even when av_read_frame
+ has pretty-much succeeded (and hence generated data which should be processed).
+ Hence it makes sense to continue here in that case.
+ */
+ if (r < 0 && r != AVERROR_INVALIDDATA) {
if (r != AVERROR_EOF) {
/* Maybe we should fail here, but for now we'll just finish off instead */
char buf[256];
av_strerror (r, buf, sizeof(buf));
LOG_ERROR (N_("error on av_read_frame (%1) (%2)"), buf, r);
}
-
+
flush ();
return true;
}
int const si = _packet.stream_index;
- if (si == _video_stream) {
+ if (si == _video_stream && !_ignore_video) {
decode_video_packet ();
} else if (_ffmpeg_content->audio_stream() && _ffmpeg_content->audio_stream()->uses_index (_format_context, si)) {
decode_audio_packet ();
shared_ptr<AudioBuffers>
FFmpegDecoder::deinterleave_audio (uint8_t** data, int size)
{
- assert (_ffmpeg_content->audio_channels());
- assert (bytes_per_audio_sample());
+ DCPOMATIC_ASSERT (_ffmpeg_content->audio_channels());
+ DCPOMATIC_ASSERT (bytes_per_audio_sample());
/* Deinterleave and convert to float */
- assert ((size % (bytes_per_audio_sample() * _ffmpeg_content->audio_channels())) == 0);
-
+ /* total_samples and frames will be rounded down here, so if there are stray samples at the end
+ of the block that do not form a complete sample or frame they will be dropped.
+ */
int const total_samples = size / bytes_per_audio_sample();
int const frames = total_samples / _ffmpeg_content->audio_channels();
shared_ptr<AudioBuffers> audio (new AudioBuffers (_ffmpeg_content->audio_channels(), frames));
{
VideoDecoder::seek (time, accurate);
AudioDecoder::seek (time, accurate);
-
+
/* If we are doing an `accurate' seek, we need to use pre-roll, as
we don't really know what the seek will give us.
*/
*/
ContentTime const u = time - _pts_offset;
- int64_t s = u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base);
-
- if (_ffmpeg_content->audio_stream ()) {
- s = min (
- s, int64_t (u.seconds() / av_q2d (_ffmpeg_content->audio_stream()->stream(_format_context)->time_base))
- );
- }
-
- av_seek_frame (_format_context, _video_stream, s, 0);
+ av_seek_frame (_format_context, _video_stream, u.seconds() / av_q2d (_format_context->streams[_video_stream]->time_base), 0);
avcodec_flush_buffers (video_codec_context());
if (audio_codec_context ()) {