X-Git-Url: https://main.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=52e8878add58203b7b474e9468c3b5fb1fa81879;hp=b82cc607249965cfbd8753c48e1421440614588b;hb=e29ce33a36c2e20444d57196defc86d5072bce81;hpb=0f8e8ae6dba273170a5f60a16a14fbe69627dc0e diff --git a/src/lib/image.cc b/src/lib/image.cc index b82cc6072..52e8878ad 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2016 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,38 +18,53 @@ */ + /** @file src/image.cc * @brief A class to describe a video image. */ -#include "image.h" + +#include "compose.hpp" +#include "dcpomatic_socket.h" #include "exceptions.h" -#include "timer.h" +#include "image.h" #include "rect.h" +#include "timer.h" #include "util.h" -#include "dcpomatic_socket.h" #include #include extern "C" { -#include -#include -#include #include +#include +#include +#include } +#include +#if HAVE_VALGRIND_MEMCHECK_H +#include +#endif #include + #include "i18n.h" -using std::string; -using std::min; -using std::max; -using std::cout; + using std::cerr; +using std::cout; using std::list; +using std::make_shared; +using std::max; +using std::min; using std::runtime_error; -using boost::shared_ptr; +using std::shared_ptr; +using std::string; using dcp::Size; + +/** The memory alignment, in bytes, used for each row of an image if aligment is requested */ +#define ALIGNMENT 64 + + int Image::vertical_factor (int n) const { @@ -57,26 +72,27 @@ Image::vertical_factor (int n) const return 1; } - AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + auto d = av_pix_fmt_desc_get(_pixel_format); if (!d) { throw PixelFormatError ("line_factor()", _pixel_format); } - return pow (2.0f, d->log2_chroma_h); + return lrintf(powf(2.0f, d->log2_chroma_h)); } int Image::horizontal_factor (int n) const { - int horizontal_factor = 1; - if (n > 0) { - AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (_pixel_format); - if (!d) { - throw PixelFormatError ("sample_size()", _pixel_format); - } - horizontal_factor = pow (2.0f, d->log2_chroma_w); + if (n == 0) { + return 1; } - return horizontal_factor; + + auto d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("sample_size()", _pixel_format); + } + + return lrintf(powf(2.0f, d->log2_chroma_w)); } /** @param n Component index. @@ -86,8 +102,8 @@ dcp::Size Image::sample_size (int n) const { return dcp::Size ( - lrint (ceil (static_cast(size().width) / horizontal_factor (n))), - lrint (ceil (static_cast(size().height) / vertical_factor (n))) + lrint (ceil(static_cast(size().width) / horizontal_factor (n))), + lrint (ceil(static_cast(size().height) / vertical_factor (n))) ); } @@ -95,11 +111,15 @@ Image::sample_size (int n) const int Image::planes () const { - AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + auto d = av_pix_fmt_desc_get(_pixel_format); if (!d) { throw PixelFormatError ("planes()", _pixel_format); } + if (_pixel_format == AV_PIX_FMT_PAL8) { + return 2; + } + if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) { return 1; } @@ -107,19 +127,46 @@ Image::planes () const return d->nb_components; } + +static +int +round_width_for_subsampling (int p, AVPixFmtDescriptor const * desc) +{ + return p & ~ ((1 << desc->log2_chroma_w) - 1); +} + + +static +int +round_height_for_subsampling (int p, AVPixFmtDescriptor const * desc) +{ + return p & ~ ((1 << desc->log2_chroma_h) - 1); +} + + /** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size'. * @param crop Amount to crop by. * @param inter_size Size to scale the cropped image to. * @param out_size Size of output frame; if this is larger than inter_size there will be black padding. * @param yuv_to_rgb YUV to RGB transformation to use, if required. + * @param video_range Video range of the image. * @param out_format Output pixel format. * @param out_aligned true to make the output image aligned. + * @param out_video_range Video range to use for the output image. * @param fast Try to be fast at the possible expense of quality; at present this means using * fast bilinear rather than bicubic scaling. */ shared_ptr Image::crop_scale_window ( - Crop crop, dcp::Size inter_size, dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast + Crop crop, + dcp::Size inter_size, + dcp::Size out_size, + dcp::YUVToRGB yuv_to_rgb, + VideoRange video_range, + AVPixelFormat out_format, + VideoRange out_video_range, + bool out_aligned, + bool fast ) const { /* Empirical testing suggests that sws_scale() will crash if @@ -130,36 +177,40 @@ Image::crop_scale_window ( DCPOMATIC_ASSERT (out_size.width >= inter_size.width); DCPOMATIC_ASSERT (out_size.height >= inter_size.height); - /* Here's an image of out_size. Below we may write to it starting at an offset so we get some padding. - Hence we want to write in the following pattern: - - block start write start line end - |..(padding)..|<------line-size------------->|..(padding)..| - |..(padding)..|<------line-size------------->|..(padding)..| - |..(padding)..|<------line-size------------->|..(padding)..| + auto out = make_shared(out_format, out_size, out_aligned); + out->make_black (); - where line-size is of the smaller (inter_size) image and the full padded line length is that of - out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size. - However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full - specified *stride*. This does not matter until we get to the last line: + auto in_desc = av_pix_fmt_desc_get (_pixel_format); + if (!in_desc) { + throw PixelFormatError ("crop_scale_window()", _pixel_format); + } - block start write start line end - |..(padding)..|<------line-size------------->|XXXwrittenXXX| - |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX| - |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX - ^^^^ out of bounds + /* Round down so that we crop only the number of pixels that is straightforward + * considering any subsampling. + */ + Crop corrected_crop( + round_width_for_subsampling(crop.left, in_desc), + round_width_for_subsampling(crop.right, in_desc), + round_height_for_subsampling(crop.top, in_desc), + round_height_for_subsampling(crop.bottom, in_desc) + ); - To get around this, we ask Image to overallocate its buffers by the overrun. - */ + /* Also check that we aren't cropping more image than there actually is */ + if ((corrected_crop.left + corrected_crop.right) >= (size().width - 4)) { + corrected_crop.left = 0; + corrected_crop.right = size().width - 4; + } - shared_ptr out (new Image (out_format, out_size, out_aligned, (out_size.width - inter_size.width) / 2)); - out->make_black (); + if ((corrected_crop.top + corrected_crop.bottom) >= (size().height - 4)) { + corrected_crop.top = 0; + corrected_crop.bottom = size().height - 4; + } /* Size of the image after any crop */ - dcp::Size const cropped_size = crop.apply (size ()); + auto const cropped_size = corrected_crop.apply (size()); /* Scale context for a scale from cropped_size to inter_size */ - struct SwsContext* scale_context = sws_getContext ( + auto scale_context = sws_getContext ( cropped_size.width, cropped_size.height, pixel_format(), inter_size.width, inter_size.height, out_format, fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0 @@ -169,42 +220,52 @@ Image::crop_scale_window ( throw runtime_error (N_("Could not allocate SwsContext")); } - DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); - int const lut[dcp::YUV_TO_RGB_COUNT] = { + DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT); + int const lut[static_cast(dcp::YUVToRGB::COUNT)] = { SWS_CS_ITU601, SWS_CS_ITU709 }; + /* The 3rd parameter here is: + 0 -> source range MPEG (i.e. "video", 16-235) + 1 -> source range JPEG (i.e. "full", 0-255) + And the 5th: + 0 -> destination range MPEG (i.e. "video", 16-235) + 1 -> destination range JPEG (i.e. "full", 0-255) + + But remember: sws_setColorspaceDetails ignores these + parameters unless the both source and destination images + are isYUV or isGray. (If either is not, it uses video range). + */ sws_setColorspaceDetails ( scale_context, - sws_getCoefficients (lut[yuv_to_rgb]), 0, - sws_getCoefficients (lut[yuv_to_rgb]), 0, + sws_getCoefficients (lut[static_cast(yuv_to_rgb)]), video_range == VideoRange::VIDEO ? 0 : 1, + sws_getCoefficients (lut[static_cast(yuv_to_rgb)]), out_video_range == VideoRange::VIDEO ? 0 : 1, 0, 1 << 16, 1 << 16 ); - AVPixFmtDescriptor const * desc = av_pix_fmt_desc_get (_pixel_format); - if (!desc) { - throw PixelFormatError ("crop_scale_window()", _pixel_format); - } - /* Prepare input data pointers with crop */ uint8_t* scale_in_data[planes()]; for (int c = 0; c < planes(); ++c) { - /* To work out the crop in bytes, start by multiplying - the crop by the (average) bytes per pixel. Then - round down so that we don't crop a subsampled pixel until - we've cropped all of its Y-channel pixels. - */ - int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) desc->log2_chroma_w); - scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / vertical_factor(c)); + int const x = lrintf(bytes_per_pixel(c) * corrected_crop.left); + scale_in_data[c] = data()[c] + x + stride()[c] * (corrected_crop.top / vertical_factor(c)); + } + + auto out_desc = av_pix_fmt_desc_get (out_format); + if (!out_desc) { + throw PixelFormatError ("crop_scale_window()", out_format); } /* Corner of the image within out_size */ - Position const corner ((out_size.width - inter_size.width) / 2, (out_size.height - inter_size.height) / 2); + Position const corner ( + round_width_for_subsampling((out_size.width - inter_size.width) / 2, out_desc), + round_height_for_subsampling((out_size.height - inter_size.height) / 2, out_desc) + ); uint8_t* scale_out_data[out->planes()]; for (int c = 0; c < out->planes(); ++c) { - scale_out_data[c] = out->data()[c] + lrintf (out->bytes_per_pixel(c) * corner.x) + out->stride()[c] * corner.y; + int const x = lrintf(out->bytes_per_pixel(c) * corner.x); + scale_out_data[c] = out->data()[c] + x + out->stride()[c] * (corner.y / out->vertical_factor(c)); } sws_scale ( @@ -216,9 +277,23 @@ Image::crop_scale_window ( sws_freeContext (scale_context); + if (corrected_crop != Crop() && cropped_size == inter_size) { + /* We are cropping without any scaling or pixel format conversion, so FFmpeg may have left some + data behind in our image. Clear it out. It may get to the point where we should just stop + trying to be clever with cropping. + */ + out->make_part_black (corner.x + cropped_size.width, out_size.width - cropped_size.width); + } + return out; } +shared_ptr +Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const +{ + return scale(size(), yuv_to_rgb, out_format, out_aligned, fast); +} + /** @param out_size Size to scale to. * @param yuv_to_rgb YUVToRGB transform transform to use, if required. * @param out_format Output pixel format. @@ -234,24 +309,34 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo */ DCPOMATIC_ASSERT (aligned ()); - shared_ptr scaled (new Image (out_format, out_size, out_aligned)); - - struct SwsContext* scale_context = sws_getContext ( + auto scaled = make_shared(out_format, out_size, out_aligned); + auto scale_context = sws_getContext ( size().width, size().height, pixel_format(), out_size.width, out_size.height, out_format, - fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0 + (fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0 ); - DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT); - int const lut[dcp::YUV_TO_RGB_COUNT] = { + DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUVToRGB::COUNT); + int const lut[static_cast(dcp::YUVToRGB::COUNT)] = { SWS_CS_ITU601, SWS_CS_ITU709 }; + /* The 3rd parameter here is: + 0 -> source range MPEG (i.e. "video", 16-235) + 1 -> source range JPEG (i.e. "full", 0-255) + And the 5th: + 0 -> destination range MPEG (i.e. "video", 16-235) + 1 -> destination range JPEG (i.e. "full", 0-255) + + But remember: sws_setColorspaceDetails ignores these + parameters unless the corresponding image isYUV or isGray. + (If it's neither, it uses video range). + */ sws_setColorspaceDetails ( scale_context, - sws_getCoefficients (lut[yuv_to_rgb]), 0, - sws_getCoefficients (lut[yuv_to_rgb]), 0, + sws_getCoefficients (lut[static_cast(yuv_to_rgb)]), 0, + sws_getCoefficients (lut[static_cast(yuv_to_rgb)]), 0, 0, 1 << 16, 1 << 16 ); @@ -273,7 +358,7 @@ Image::yuv_16_black (uint16_t v, bool alpha) { memset (data()[0], 0, sample_size(0).height * stride()[0]); for (int i = 1; i < 3; ++i) { - int16_t* p = reinterpret_cast (data()[i]); + auto p = reinterpret_cast (data()[i]); int const lines = sample_size(i).height; for (int y = 0; y < lines; ++y) { /* We divide by 2 here because we are writing 2 bytes at a time */ @@ -295,6 +380,36 @@ Image::swap_16 (uint16_t v) return ((v >> 8) & 0xff) | ((v & 0xff) << 8); } +void +Image::make_part_black (int x, int w) +{ + switch (_pixel_format) { + case AV_PIX_FMT_RGB24: + case AV_PIX_FMT_ARGB: + case AV_PIX_FMT_RGBA: + case AV_PIX_FMT_ABGR: + case AV_PIX_FMT_BGRA: + case AV_PIX_FMT_RGB555LE: + case AV_PIX_FMT_RGB48LE: + case AV_PIX_FMT_RGB48BE: + case AV_PIX_FMT_XYZ12LE: + { + int const h = sample_size(0).height; + int const bpp = bytes_per_pixel(0); + int const s = stride()[0]; + uint8_t* p = data()[0]; + for (int y = 0; y < h; y++) { + memset (p + x * bpp, 0, w * bpp); + p += s; + } + break; + } + + default: + throw PixelFormatError ("make_part_black()", _pixel_format); + } +} + void Image::make_black () { @@ -422,48 +537,21 @@ Image::make_black () void Image::make_transparent () { - if (_pixel_format != AV_PIX_FMT_RGBA) { + if (_pixel_format != AV_PIX_FMT_BGRA && _pixel_format != AV_PIX_FMT_RGBA) { throw PixelFormatError ("make_transparent()", _pixel_format); } memset (data()[0], 0, sample_size(0).height * stride()[0]); } -template -void -component ( - int n, - Image* base, - shared_ptr other, - shared_ptr rgba, - int start_base_x, int start_base_y, - int start_other_x, int start_other_y - ) -{ - dcp::Size const base_size = base->sample_size(n); - dcp::Size const other_size = other->sample_size(n); - for (int by = start_base_y, oy = start_other_y; by < base_size.height && oy < other_size.height; ++by, ++oy) { - /* base image */ - T* bp = ((T*) (base->data()[n] + by * base->stride()[n])) + start_base_x; - /* overlay image */ - T* op = ((T*) (other->data()[n] + oy * other->stride()[n])); - /* original RGBA for alpha channel */ - uint8_t* rp = rgba->data()[0] + oy * rgba->stride()[0]; - for (int bx = start_base_x, ox = start_other_x; bx < base_size.width && ox < other_size.width; ++bx, ++ox) { - float const alpha = float (rp[3]) / 255; - *bp = *op * alpha + *bp * (1 - alpha); - ++bp; - ++op; - rp += 4; - } - } -} - void Image::alpha_blend (shared_ptr other, Position position) { - /* We're blending RGBA images; first byte is blue, second byte is green, third byte blue, fourth byte alpha */ - DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_RGBA); + /* We're blending RGBA or BGRA images */ + DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_BGRA || other->pixel_format() == AV_PIX_FMT_RGBA); + int const blue = other->pixel_format() == AV_PIX_FMT_BGRA ? 0 : 2; + int const red = other->pixel_format() == AV_PIX_FMT_BGRA ? 2 : 0; + int const other_bpp = 4; int start_tx = position.x; @@ -492,9 +580,9 @@ Image::alpha_blend (shared_ptr other, Position position) uint8_t* op = other->data()[0] + oy * other->stride()[0]; for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { float const alpha = float (op[3]) / 255; - tp[0] = op[2] * alpha + tp[0] * (1 - alpha); + tp[0] = op[red] * alpha + tp[0] * (1 - alpha); tp[1] = op[1] * alpha + tp[1] * (1 - alpha); - tp[2] = op[0] * alpha + tp[2] * (1 - alpha); + tp[2] = op[blue] * alpha + tp[2] * (1 - alpha); tp += this_bpp; op += other_bpp; @@ -503,6 +591,24 @@ Image::alpha_blend (shared_ptr other, Position position) break; } case AV_PIX_FMT_BGRA: + { + int const this_bpp = 4; + for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp; + uint8_t* op = other->data()[0] + oy * other->stride()[0]; + for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { + float const alpha = float (op[3]) / 255; + tp[0] = op[blue] * alpha + tp[0] * (1 - alpha); + tp[1] = op[1] * alpha + tp[1] * (1 - alpha); + tp[2] = op[red] * alpha + tp[2] * (1 - alpha); + tp[3] = op[3] * alpha + tp[3] * (1 - alpha); + + tp += this_bpp; + op += other_bpp; + } + } + break; + } case AV_PIX_FMT_RGBA: { int const this_bpp = 4; @@ -511,9 +617,9 @@ Image::alpha_blend (shared_ptr other, Position position) uint8_t* op = other->data()[0] + oy * other->stride()[0]; for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { float const alpha = float (op[3]) / 255; - tp[0] = op[0] * alpha + tp[0] * (1 - alpha); + tp[0] = op[red] * alpha + tp[0] * (1 - alpha); tp[1] = op[1] * alpha + tp[1] * (1 - alpha); - tp[2] = op[2] * alpha + tp[2] * (1 - alpha); + tp[2] = op[blue] * alpha + tp[2] * (1 - alpha); tp[3] = op[3] * alpha + tp[3] * (1 - alpha); tp += this_bpp; @@ -530,10 +636,10 @@ Image::alpha_blend (shared_ptr other, Position position) uint8_t* op = other->data()[0] + oy * other->stride()[0]; for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) { float const alpha = float (op[3]) / 255; - /* Blend high bytes; the RGBA in op appears to be BGRA */ - tp[1] = op[2] * alpha + tp[1] * (1 - alpha); + /* Blend high bytes */ + tp[1] = op[red] * alpha + tp[1] * (1 - alpha); tp[3] = op[1] * alpha + tp[3] * (1 - alpha); - tp[5] = op[0] * alpha + tp[5] * (1 - alpha); + tp[5] = op[blue] * alpha + tp[5] * (1 - alpha); tp += this_bpp; op += other_bpp; @@ -543,7 +649,7 @@ Image::alpha_blend (shared_ptr other, Position position) } case AV_PIX_FMT_XYZ12LE: { - dcp::ColourConversion conv = dcp::ColourConversion::srgb_to_xyz(); + auto conv = dcp::ColourConversion::srgb_to_xyz(); double fast_matrix[9]; dcp::combined_rgb_to_xyz (conv, fast_matrix); double const * lut_in = conv.in()->lut (8, false); @@ -556,9 +662,9 @@ Image::alpha_blend (shared_ptr other, Position position) float const alpha = float (op[3]) / 255; /* Convert sRGB to XYZ; op is BGRA. First, input gamma LUT */ - double const r = lut_in[op[2]]; + double const r = lut_in[op[red]]; double const g = lut_in[op[1]]; - double const b = lut_in[op[0]]; + double const b = lut_in[op[blue]]; /* RGB to XYZ, including Bradford transform and DCI companding */ double const x = max (0.0, min (65535.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2])); @@ -578,18 +684,105 @@ Image::alpha_blend (shared_ptr other, Position position) } case AV_PIX_FMT_YUV420P: { - shared_ptr yuv = other->scale (other->size(), dcp::YUV_TO_RGB_REC709, _pixel_format, false, false); - component (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy); - component (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy); - component (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy); + auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false); + dcp::Size const ts = size(); + dcp::Size const os = yuv->size(); + for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) { + int const hty = ty / 2; + int const hoy = oy / 2; + uint8_t* tY = data()[0] + (ty * stride()[0]) + start_tx; + uint8_t* tU = data()[1] + (hty * stride()[1]) + start_tx / 2; + uint8_t* tV = data()[2] + (hty * stride()[2]) + start_tx / 2; + uint8_t* oY = yuv->data()[0] + (oy * yuv->stride()[0]) + start_ox; + uint8_t* oU = yuv->data()[1] + (hoy * yuv->stride()[1]) + start_ox / 2; + uint8_t* oV = yuv->data()[2] + (hoy * yuv->stride()[2]) + start_ox / 2; + uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4; + for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) { + float const a = float(alpha[3]) / 255; + *tY = *oY * a + *tY * (1 - a); + *tU = *oU * a + *tU * (1 - a); + *tV = *oV * a + *tV * (1 - a); + ++tY; + ++oY; + if (tx % 2) { + ++tU; + ++tV; + } + if (ox % 2) { + ++oU; + ++oV; + } + alpha += 4; + } + } break; } case AV_PIX_FMT_YUV420P10: { - shared_ptr yuv = other->scale (other->size(), dcp::YUV_TO_RGB_REC709, _pixel_format, false, false); - component (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy); - component (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy); - component (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy); + auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false); + dcp::Size const ts = size(); + dcp::Size const os = yuv->size(); + for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) { + int const hty = ty / 2; + int const hoy = oy / 2; + uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx; + uint16_t* tU = ((uint16_t *) (data()[1] + (hty * stride()[1]))) + start_tx / 2; + uint16_t* tV = ((uint16_t *) (data()[2] + (hty * stride()[2]))) + start_tx / 2; + uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox; + uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (hoy * yuv->stride()[1]))) + start_ox / 2; + uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (hoy * yuv->stride()[2]))) + start_ox / 2; + uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4; + for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) { + float const a = float(alpha[3]) / 255; + *tY = *oY * a + *tY * (1 - a); + *tU = *oU * a + *tU * (1 - a); + *tV = *oV * a + *tV * (1 - a); + ++tY; + ++oY; + if (tx % 2) { + ++tU; + ++tV; + } + if (ox % 2) { + ++oU; + ++oV; + } + alpha += 4; + } + } + break; + } + case AV_PIX_FMT_YUV422P10LE: + { + auto yuv = other->convert_pixel_format (dcp::YUVToRGB::REC709, _pixel_format, false, false); + dcp::Size const ts = size(); + dcp::Size const os = yuv->size(); + for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) { + uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx; + uint16_t* tU = ((uint16_t *) (data()[1] + (ty * stride()[1]))) + start_tx / 2; + uint16_t* tV = ((uint16_t *) (data()[2] + (ty * stride()[2]))) + start_tx / 2; + uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox; + uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (oy * yuv->stride()[1]))) + start_ox / 2; + uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (oy * yuv->stride()[2]))) + start_ox / 2; + uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4; + for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) { + float const a = float(alpha[3]) / 255; + *tY = *oY * a + *tY * (1 - a); + *tU = *oU * a + *tU * (1 - a); + *tV = *oV * a + *tV * (1 - a); + ++tY; + ++oY; + if (tx % 2) { + ++tU; + ++tV; + } + if (ox % 2) { + ++oU; + ++oV; + } + alpha += 4; + } + } break; } default: @@ -641,7 +834,7 @@ Image::write_to_socket (shared_ptr socket) const float Image::bytes_per_pixel (int c) const { - AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + auto d = av_pix_fmt_desc_get(_pixel_format); if (!d) { throw PixelFormatError ("bytes_per_pixel()", _pixel_format); } @@ -689,14 +882,12 @@ Image::bytes_per_pixel (int c) const * * @param p Pixel format. * @param s Size in pixels. - * @param aligned true to make each row of this image aligned to a 32-byte boundary. - * @param extra_pixels Amount of extra "run-off" memory to allocate at the end of each plane in pixels. + * @param aligned true to make each row of this image aligned to a ALIGNMENT-byte boundary. */ -Image::Image (AVPixelFormat p, dcp::Size s, bool aligned, int extra_pixels) +Image::Image (AVPixelFormat p, dcp::Size s, bool aligned) : _size (s) , _pixel_format (p) , _aligned (aligned) - , _extra_pixels (extra_pixels) { allocate (); } @@ -715,7 +906,7 @@ Image::allocate () for (int i = 0; i < planes(); ++i) { _line_size[i] = ceil (_size.width * bytes_per_pixel(i)); - _stride[i] = stride_round_up (i, _line_size, _aligned ? 32 : 1); + _stride[i] = stride_round_up (i, _line_size, _aligned ? ALIGNMENT : 1); /* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm) uses a 16-byte fetch to read three bytes (R/G/B) of image data. @@ -728,18 +919,47 @@ Image::allocate () Further to the above, valgrind is now telling me that ff_rgb24ToY_ssse3 over-reads by more then _avx. I can't follow the code to work out how much, - so I'll just over-allocate by 32 bytes and have done with it. Empirical + so I'll just over-allocate by ALIGNMENT bytes and have done with it. Empirical testing suggests that it works. + + In addition to these concerns, we may read/write as much as a whole extra line + at the end of each plane in cases where we are messing with offsets in order to + do pad or crop. To solve this we over-allocate by an extra _stride[i] bytes. + + As an example: we may write to images starting at an offset so we get some padding. + Hence we want to write in the following pattern: + + block start write start line end + |..(padding)..|<------line-size------------->|..(padding)..| + |..(padding)..|<------line-size------------->|..(padding)..| + |..(padding)..|<------line-size------------->|..(padding)..| + + where line-size is of the smaller (inter_size) image and the full padded line length is that of + out_size. To get things to work we have to tell FFmpeg that the stride is that of out_size. + However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full + specified *stride*. This does not matter until we get to the last line: + + block start write start line end + |..(padding)..|<------line-size------------->|XXXwrittenXXX| + |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX| + |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX + ^^^^ out of bounds */ - _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + _extra_pixels * bytes_per_pixel(i) + 32); + _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * (sample_size(i).height + 1) + ALIGNMENT); +#if HAVE_VALGRIND_MEMCHECK_H + /* The data between the end of the line size and the stride is undefined but processed by + libswscale, causing lots of valgrind errors. Mark it all defined to quell these errors. + */ + VALGRIND_MAKE_MEM_DEFINED (_data[i], _stride[i] * (sample_size(i).height + 1) + ALIGNMENT); +#endif } } Image::Image (Image const & other) - : _size (other._size) + : std::enable_shared_from_this(other) + , _size (other._size) , _pixel_format (other._pixel_format) , _aligned (other._aligned) - , _extra_pixels (other._extra_pixels) { allocate (); @@ -757,10 +977,11 @@ Image::Image (Image const & other) Image::Image (AVFrame* frame) : _size (frame->width, frame->height) - , _pixel_format (static_cast (frame->format)) + , _pixel_format (static_cast(frame->format)) , _aligned (true) - , _extra_pixels (0) { + DCPOMATIC_ASSERT (_pixel_format != AV_PIX_FMT_NONE); + allocate (); for (int i = 0; i < planes(); ++i) { @@ -780,7 +1001,6 @@ Image::Image (shared_ptr other, bool aligned) : _size (other->_size) , _pixel_format (other->_pixel_format) , _aligned (aligned) - , _extra_pixels (other->_extra_pixels) { allocate (); @@ -822,10 +1042,8 @@ Image::swap (Image & other) } std::swap (_aligned, other._aligned); - std::swap (_extra_pixels, other._extra_pixels); } -/** Destroy a Image */ Image::~Image () { for (int i = 0; i < planes(); ++i) { @@ -867,11 +1085,12 @@ Image::aligned () const return _aligned; } + PositionImage merge (list images) { if (images.empty ()) { - return PositionImage (); + return {}; } if (images.size() == 1) { @@ -879,19 +1098,20 @@ merge (list images) } dcpomatic::Rect all (images.front().position, images.front().image->size().width, images.front().image->size().height); - for (list::const_iterator i = images.begin(); i != images.end(); ++i) { - all.extend (dcpomatic::Rect (i->position, i->image->size().width, i->image->size().height)); + for (auto const& i: images) { + all.extend (dcpomatic::Rect(i.position, i.image->size().width, i.image->size().height)); } - shared_ptr merged (new Image (images.front().image->pixel_format (), dcp::Size (all.width, all.height), true)); + auto merged = make_shared(images.front().image->pixel_format(), dcp::Size(all.width, all.height), true); merged->make_transparent (); - for (list::const_iterator i = images.begin(); i != images.end(); ++i) { - merged->alpha_blend (i->image, i->position - all.position()); + for (auto const& i: images) { + merged->alpha_blend (i.image, i.position - all.position()); } return PositionImage (merged, all.position ()); } + bool operator== (Image const & a, Image const & b) { @@ -926,49 +1146,61 @@ operator== (Image const & a, Image const & b) void Image::fade (float f) { + /* U/V black value for 8-bit colour */ + static int const eight_bit_uv = (1 << 7) - 1; + /* U/V black value for 10-bit colour */ + static uint16_t const ten_bit_uv = (1 << 9) - 1; + switch (_pixel_format) { case AV_PIX_FMT_YUV420P: - case AV_PIX_FMT_YUV422P: - case AV_PIX_FMT_YUV444P: - case AV_PIX_FMT_YUV411P: - case AV_PIX_FMT_YUVJ420P: - case AV_PIX_FMT_YUVJ422P: - case AV_PIX_FMT_YUVJ444P: - case AV_PIX_FMT_RGB24: - case AV_PIX_FMT_ARGB: - case AV_PIX_FMT_RGBA: - case AV_PIX_FMT_ABGR: - case AV_PIX_FMT_BGRA: - case AV_PIX_FMT_RGB555LE: - /* 8-bit */ - for (int c = 0; c < 3; ++c) { + { + /* Y */ + uint8_t* p = data()[0]; + int const lines = sample_size(0).height; + for (int y = 0; y < lines; ++y) { + uint8_t* q = p; + for (int x = 0; x < line_size()[0]; ++x) { + *q = int(float(*q) * f); + ++q; + } + p += stride()[0]; + } + + /* U, V */ + for (int c = 1; c < 3; ++c) { uint8_t* p = data()[c]; int const lines = sample_size(c).height; for (int y = 0; y < lines; ++y) { uint8_t* q = p; for (int x = 0; x < line_size()[c]; ++x) { - *q = int (float (*q) * f); + *q = eight_bit_uv + int((int(*q) - eight_bit_uv) * f); ++q; } p += stride()[c]; } } + break; + } + + case AV_PIX_FMT_RGB24: + { + /* 8-bit */ + uint8_t* p = data()[0]; + int const lines = sample_size(0).height; + for (int y = 0; y < lines; ++y) { + uint8_t* q = p; + for (int x = 0; x < line_size()[0]; ++x) { + *q = int (float (*q) * f); + ++q; + } + p += stride()[0]; + } + break; + } - case AV_PIX_FMT_YUV422P9LE: - case AV_PIX_FMT_YUV444P9LE: - case AV_PIX_FMT_YUV422P10LE: - case AV_PIX_FMT_YUV444P10LE: - case AV_PIX_FMT_YUV422P16LE: - case AV_PIX_FMT_YUV444P16LE: - case AV_PIX_FMT_YUVA420P9LE: - case AV_PIX_FMT_YUVA422P9LE: - case AV_PIX_FMT_YUVA444P9LE: - case AV_PIX_FMT_YUVA420P10LE: - case AV_PIX_FMT_YUVA422P10LE: - case AV_PIX_FMT_YUVA444P10LE: - case AV_PIX_FMT_RGB48LE: case AV_PIX_FMT_XYZ12LE: + case AV_PIX_FMT_RGB48LE: /* 16-bit little-endian */ for (int c = 0; c < 3; ++c) { int const stride_pixels = stride()[c] / 2; @@ -986,22 +1218,26 @@ Image::fade (float f) } break; - case AV_PIX_FMT_YUV422P9BE: - case AV_PIX_FMT_YUV444P9BE: - case AV_PIX_FMT_YUV444P10BE: - case AV_PIX_FMT_YUV422P10BE: - case AV_PIX_FMT_YUVA420P9BE: - case AV_PIX_FMT_YUVA422P9BE: - case AV_PIX_FMT_YUVA444P9BE: - case AV_PIX_FMT_YUVA420P10BE: - case AV_PIX_FMT_YUVA422P10BE: - case AV_PIX_FMT_YUVA444P10BE: - case AV_PIX_FMT_YUVA420P16BE: - case AV_PIX_FMT_YUVA422P16BE: - case AV_PIX_FMT_YUVA444P16BE: - case AV_PIX_FMT_RGB48BE: - /* 16-bit big-endian */ - for (int c = 0; c < 3; ++c) { + case AV_PIX_FMT_YUV422P10LE: + { + /* Y */ + { + int const stride_pixels = stride()[0] / 2; + int const line_size_pixels = line_size()[0] / 2; + uint16_t* p = reinterpret_cast (data()[0]); + int const lines = sample_size(0).height; + for (int y = 0; y < lines; ++y) { + uint16_t* q = p; + for (int x = 0; x < line_size_pixels; ++x) { + *q = int(float(*q) * f); + ++q; + } + p += stride_pixels; + } + } + + /* U, V */ + for (int c = 1; c < 3; ++c) { int const stride_pixels = stride()[c] / 2; int const line_size_pixels = line_size()[c] / 2; uint16_t* p = reinterpret_cast (data()[c]); @@ -1009,7 +1245,7 @@ Image::fade (float f) for (int y = 0; y < lines; ++y) { uint16_t* q = p; for (int x = 0; x < line_size_pixels; ++x) { - *q = swap_16 (int (float (swap_16 (*q)) * f)); + *q = ten_bit_uv + int((int(*q) - ten_bit_uv) * f); ++q; } p += stride_pixels; @@ -1017,21 +1253,169 @@ Image::fade (float f) } break; - case AV_PIX_FMT_UYVY422: + } + + default: + throw PixelFormatError ("fade()", _pixel_format); + } +} + +shared_ptr +Image::ensure_aligned (shared_ptr image) +{ + if (image->aligned()) { + return image; + } + + return make_shared(image, true); +} + +size_t +Image::memory_used () const +{ + size_t m = 0; + for (int i = 0; i < planes(); ++i) { + m += _stride[i] * sample_size(i).height; + } + return m; +} + +class Memory +{ +public: + Memory () + : data(0) + , size(0) + {} + + ~Memory () { - int const Y = sample_size(0).height; - int const X = line_size()[0]; + free (data); + } + + uint8_t* data; + size_t size; +}; + +static void +png_write_data (png_structp png_ptr, png_bytep data, png_size_t length) +{ + auto mem = reinterpret_cast(png_get_io_ptr(png_ptr)); + size_t size = mem->size + length; + + if (mem->data) { + mem->data = reinterpret_cast(realloc(mem->data, size)); + } else { + mem->data = reinterpret_cast(malloc(size)); + } + + if (!mem->data) { + throw EncodeError (N_("could not allocate memory for PNG")); + } + + memcpy (mem->data + mem->size, data, length); + mem->size += length; +} + +static void +png_flush (png_structp) +{ + +} + +static void +png_error_fn (png_structp png_ptr, char const * message) +{ + reinterpret_cast(png_get_error_ptr(png_ptr))->png_error (message); +} + +void +Image::png_error (char const * message) +{ + throw EncodeError (String::compose ("Error during PNG write: %1", message)); +} + +dcp::ArrayData +Image::as_png () const +{ + DCPOMATIC_ASSERT (bytes_per_pixel(0) == 4); + DCPOMATIC_ASSERT (planes() == 1); + if (pixel_format() != AV_PIX_FMT_RGBA) { + return convert_pixel_format(dcp::YUVToRGB::REC709, AV_PIX_FMT_RGBA, true, false)->as_png(); + } + + /* error handling? */ + png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, reinterpret_cast(const_cast(this)), png_error_fn, 0); + if (!png_ptr) { + throw EncodeError (N_("could not create PNG write struct")); + } + + Memory state; + + png_set_write_fn (png_ptr, &state, png_write_data, png_flush); + + png_infop info_ptr = png_create_info_struct(png_ptr); + if (!info_ptr) { + png_destroy_write_struct (&png_ptr, &info_ptr); + throw EncodeError (N_("could not create PNG info struct")); + } + + png_set_IHDR (png_ptr, info_ptr, size().width, size().height, 8, PNG_COLOR_TYPE_RGBA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); + + png_byte ** row_pointers = reinterpret_cast(png_malloc(png_ptr, size().height * sizeof(png_byte *))); + for (int i = 0; i < size().height; ++i) { + row_pointers[i] = (png_byte *) (data()[0] + i * stride()[0]); + } + + png_write_info (png_ptr, info_ptr); + png_write_image (png_ptr, row_pointers); + png_write_end (png_ptr, info_ptr); + + png_destroy_write_struct (&png_ptr, &info_ptr); + png_free (png_ptr, row_pointers); + + return dcp::ArrayData (state.data, state.size); +} + + +void +Image::video_range_to_full_range () +{ + switch (_pixel_format) { + case AV_PIX_FMT_RGB24: + { + float const factor = 256.0 / 219.0; uint8_t* p = data()[0]; - for (int y = 0; y < Y; ++y) { - for (int x = 0; x < X; ++x) { - *p = int (float (*p) * f); - ++p; + int const lines = sample_size(0).height; + for (int y = 0; y < lines; ++y) { + uint8_t* q = p; + for (int x = 0; x < line_size()[0]; ++x) { + *q = int((*q - 16) * factor); + ++q; + } + p += stride()[0]; + } + break; + } + case AV_PIX_FMT_GBRP12LE: + { + float const factor = 4096.0 / 3504.0; + for (int c = 0; c < 3; ++c) { + uint16_t* p = reinterpret_cast(data()[c]); + int const lines = sample_size(c).height; + for (int y = 0; y < lines; ++y) { + uint16_t* q = p; + int const line_size_pixels = line_size()[c] / 2; + for (int x = 0; x < line_size_pixels; ++x) { + *q = int((*q - 256) * factor); + ++q; + } } } break; } - default: - throw PixelFormatError ("fade()", _pixel_format); + throw PixelFormatError ("video_range_to_full_range()", _pixel_format); } } +