X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fffmpeg_decoder.cc;h=5240decb24dfd69e74650b31a2de3b13ab8c8f18;hb=6f344b876689a1234a5eb75041882f06f5d9fe5c;hp=3d52448079b449cd69092c476879c69f9886e147;hpb=6aadfd3e775d7c23047dad1f383b8b68ba964782;p=dcpomatic.git diff --git a/src/lib/ffmpeg_decoder.cc b/src/lib/ffmpeg_decoder.cc index 3d5244807..5240decb2 100644 --- a/src/lib/ffmpeg_decoder.cc +++ b/src/lib/ffmpeg_decoder.cc @@ -72,12 +72,12 @@ using boost::split; using dcp::Size; FFmpegDecoder::FFmpegDecoder (shared_ptr c, shared_ptr log, bool fast) - : VideoDecoder (c) + : VideoDecoder (c->video, log) , AudioDecoder (c, fast) , SubtitleDecoder (c) , FFmpeg (c) , _log (log) - , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->video_frame_rate())) + , _pts_offset (pts_offset (c->ffmpeg_audio_streams(), c->first_video(), c->video->video_frame_rate())) { } @@ -130,7 +130,7 @@ FFmpegDecoder::pass (PassReason reason, bool accurate) decode_audio_packet (); } - av_free_packet (&_packet); + av_packet_unref (&_packet); return false; } @@ -218,6 +218,17 @@ FFmpegDecoder::deinterleave_audio (shared_ptr stream) const } break; + case AV_SAMPLE_FMT_S32P: + { + int32_t** p = reinterpret_cast (_frame->data); + for (int i = 0; i < stream->channels(); ++i) { + for (int j = 0; j < frames; ++j) { + audio->data(i)[j] = static_cast(p[i][j]) / (1 << 31); + } + } + } + break; + case AV_SAMPLE_FMT_FLT: { float* p = reinterpret_cast (_frame->data[0]); @@ -403,7 +414,7 @@ FFmpegDecoder::decode_video_packet () double const pts = i->second * av_q2d (_format_context->streams[_video_stream]->time_base) + _pts_offset.seconds (); video ( shared_ptr (new RawImageProxy (image)), - llrint (pts * _ffmpeg_content->video_frame_rate ()) + llrint (pts * _ffmpeg_content->video->video_frame_rate ()) ); } else { LOG_WARNING_NC ("Dropping frame without PTS"); @@ -484,12 +495,21 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP */ shared_ptr image (new Image (AV_PIX_FMT_RGBA, dcp::Size (rect->w, rect->h), true)); +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT /* Start of the first line in the subtitle */ uint8_t* sub_p = rect->pict.data[0]; /* sub_p looks up into a BGRA palette which is here (i.e. first byte B, second G, third R, fourth A) */ uint32_t const * palette = (uint32_t *) rect->pict.data[1]; +#else + /* Start of the first line in the subtitle */ + uint8_t* sub_p = rect->data[0]; + /* sub_p looks up into a BGRA palette which is here + (i.e. first byte B, second G, third R, fourth A) + */ + uint32_t const * palette = (uint32_t *) rect->data[1]; +#endif /* And the stream has a map of those palette colours to colours chosen by the user; created a `mapped' palette from those settings. */ @@ -520,11 +540,15 @@ FFmpegDecoder::decode_bitmap_subtitle (AVSubtitleRect const * rect, ContentTimeP /* XXX: this seems to be wrong to me (isn't the output image RGBA?) but it looks right on screen */ *out_line_p++ = (p.a << 24) | (p.r << 16) | (p.g << 8) | p.b; } +#ifdef DCPOMATIC_HAVE_AVSUBTITLERECT_PICT sub_p += rect->pict.linesize[0]; +#else + sub_p += rect->linesize[0]; +#endif out_p += image->stride()[0] / sizeof (uint32_t); } - dcp::Size const vs = _ffmpeg_content->video_size (); + dcp::Size const vs = _ffmpeg_content->video->video_size (); dcpomatic::Rect const scaled_rect ( static_cast (rect->x) / vs.width, static_cast (rect->y) / vs.height, @@ -573,6 +597,7 @@ FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period) dcp::SubtitleString ( boost::optional (), k.italic, + k.bold, dcp::Colour (255, 255, 255), /* 48pt is 1/22nd of the screen height */ 48, @@ -586,6 +611,7 @@ FFmpegDecoder::decode_ass_subtitle (string ass, ContentTimePeriod period) */ 1.015 - ((1 + highest - j.vertical_position.line.get()) * 1.5 / 22), dcp::VALIGN_TOP, + dcp::DIRECTION_LTR, k.text, static_cast (0), dcp::Colour (255, 255, 255),