X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=268c08173086659f657530eac0419f2e150a239e;hb=72234d49c16bad30d66256dd6e82d1359f4f830b;hp=06b98c4d3dc8f877c5c0042dd802d466eb34fb87;hpb=fd940c35fc6f6857b93e07a8fb9ac2110dab9475;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index 06b98c4d3..268c08173 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -40,8 +40,17 @@ extern "C" { #include "exceptions.h" #include "scaler.h" +#include "i18n.h" + using namespace std; using namespace boost; +using libdcp::Size; + +void +Image::swap (Image& other) +{ + std::swap (_pixel_format, other._pixel_format); +} /** @param n Component index. * @return Number of lines in the image for the given component. @@ -60,9 +69,15 @@ Image::lines (int n) const case PIX_FMT_RGB24: case PIX_FMT_RGBA: case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV422P: + case PIX_FMT_YUV444P: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV444P10LE: return size().height; default: - assert (false); + throw PixelFormatError (N_("lines()"), _pixel_format); } return 0; @@ -75,23 +90,33 @@ Image::components () const switch (_pixel_format) { case PIX_FMT_YUV420P: case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV422P: + case PIX_FMT_YUV444P: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV444P10LE: return 3; case PIX_FMT_RGB24: case PIX_FMT_RGBA: return 1; default: - assert (false); + throw PixelFormatError (N_("components()"), _pixel_format); } return 0; } shared_ptr -Image::scale (Size out_size, Scaler const * scaler) const +Image::scale (libdcp::Size out_size, Scaler const * scaler, bool result_aligned) const { assert (scaler); + /* Empirical testing suggests that sws_scale() will crash if + the input image is not aligned. + */ + assert (aligned ()); - shared_ptr scaled (new AlignedImage (pixel_format(), out_size)); + shared_ptr scaled (new SimpleImage (pixel_format(), out_size, result_aligned)); struct SwsContext* scale_context = sws_getContext ( size().width, size().height, pixel_format(), @@ -116,14 +141,18 @@ Image::scale (Size out_size, Scaler const * scaler) const * @param scaler Scaler to use. */ shared_ptr -Image::scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scaler) const +Image::scale_and_convert_to_rgb (libdcp::Size out_size, int padding, Scaler const * scaler, bool result_aligned) const { assert (scaler); + /* Empirical testing suggests that sws_scale() will crash if + the input image is not aligned. + */ + assert (aligned ()); - Size content_size = out_size; + libdcp::Size content_size = out_size; content_size.width -= (padding * 2); - shared_ptr rgb (new AlignedImage (PIX_FMT_RGB24, content_size)); + shared_ptr rgb (new SimpleImage (PIX_FMT_RGB24, content_size, result_aligned)); struct SwsContext* scale_context = sws_getContext ( size().width, size().height, pixel_format(), @@ -144,7 +173,7 @@ Image::scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scal scheme of things. */ if (padding > 0) { - shared_ptr padded_rgb (new AlignedImage (PIX_FMT_RGB24, out_size)); + shared_ptr padded_rgb (new SimpleImage (PIX_FMT_RGB24, out_size, result_aligned)); padded_rgb->make_black (); /* XXX: we are cheating a bit here; we know the frame is RGB so we can @@ -171,9 +200,9 @@ Image::scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scal * @return Post-processed image. */ shared_ptr -Image::post_process (string pp) const +Image::post_process (string pp, bool aligned) const { - shared_ptr out (new AlignedImage (pixel_format(), size ())); + shared_ptr out (new SimpleImage (pixel_format(), size (), aligned)); int pp_format = 0; switch (pixel_format()) { @@ -181,10 +210,17 @@ Image::post_process (string pp) const pp_format = PP_FORMAT_420; break; case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV422P: pp_format = PP_FORMAT_422; break; + case PIX_FMT_YUV444P: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV444P10LE: + pp_format = PP_FORMAT_444; default: - assert (false); + throw PixelFormatError (N_("post_process"), pixel_format()); } pp_mode* mode = pp_get_mode_by_name_and_quality (pp.c_str (), PP_QUALITY_MAX); @@ -203,17 +239,92 @@ Image::post_process (string pp) const return out; } +shared_ptr +Image::crop (Crop crop, bool aligned) const +{ + libdcp::Size cropped_size = size (); + cropped_size.width -= crop.left + crop.right; + cropped_size.height -= crop.top + crop.bottom; + + shared_ptr out (new SimpleImage (pixel_format(), cropped_size, aligned)); + + for (int c = 0; c < components(); ++c) { + int const crop_left_in_bytes = bytes_per_pixel(c) * crop.left; + int const cropped_width_in_bytes = bytes_per_pixel(c) * cropped_size.width; + + /* Start of the source line, cropped from the top but not the left */ + uint8_t* in_p = data()[c] + crop.top * stride()[c]; + uint8_t* out_p = out->data()[c]; + + for (int y = 0; y < cropped_size.height; ++y) { + memcpy (out_p, in_p + crop_left_in_bytes, cropped_width_in_bytes); + in_p += stride()[c]; + out_p += out->stride()[c]; + } + } + + return out; +} + +/** Blacken a YUV image whose bits per pixel is rounded up to 16 */ +void +Image::yuv_16_black (uint16_t v) +{ + memset (data()[0], 0, lines(0) * stride()[0]); + for (int i = 1; i < 3; ++i) { + int16_t* p = reinterpret_cast (data()[i]); + for (int y = 0; y < size().height; ++y) { + for (int x = 0; x < line_size()[i] / 2; ++x) { + p[x] = v; + } + p += stride()[i] / 2; + } + } +} + +uint16_t +Image::swap_16 (uint16_t v) +{ + return ((v >> 8) & 0xff) | ((v & 0xff) << 8); +} + void Image::make_black () { + /* U/V black value for 9-bit colour */ + static uint16_t const nine_bit_uv = (1 << 8) - 1; + + /* U/V black value for 10-bit colour */ + static uint16_t const ten_bit_uv = (1 << 9) - 1; + switch (_pixel_format) { case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV422P: + case PIX_FMT_YUV444P: memset (data()[0], 0, lines(0) * stride()[0]); - memset (data()[1], 0x80, lines(1) * stride()[1]); - memset (data()[2], 0x80, lines(2) * stride()[2]); + memset (data()[1], 0x7f, lines(1) * stride()[1]); + memset (data()[2], 0x7f, lines(2) * stride()[2]); + break; + + case PIX_FMT_YUV422P9LE: + case PIX_FMT_YUV444P9LE: + yuv_16_black (nine_bit_uv); break; + case PIX_FMT_YUV422P9BE: + case PIX_FMT_YUV444P9BE: + yuv_16_black (swap_16 (nine_bit_uv)); + break; + + case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV444P10LE: + yuv_16_black (ten_bit_uv); + break; + + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV422P10BE: + yuv_16_black (swap_16 (ten_bit_uv)); + case PIX_FMT_RGB24: memset (data()[0], 0, lines(0) * stride()[0]); break; @@ -224,7 +335,7 @@ Image::make_black () } void -Image::alpha_blend (shared_ptr other, Position position) +Image::alpha_blend (shared_ptr other, Position position) { /* Only implemented for RGBA onto RGB24 so far */ assert (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGBA); @@ -265,7 +376,7 @@ Image::read_from_socket (shared_ptr socket) for (int i = 0; i < components(); ++i) { uint8_t* p = data()[i]; for (int y = 0; y < lines(i); ++y) { - socket->read_definite_and_consume (p, line_size()[i], 30); + socket->read (p, line_size()[i]); p += stride()[i]; } } @@ -277,21 +388,77 @@ Image::write_to_socket (shared_ptr socket) const for (int i = 0; i < components(); ++i) { uint8_t* p = data()[i]; for (int y = 0; y < lines(i); ++y) { - socket->write (p, line_size()[i], 30); + socket->write (p, line_size()[i]); p += stride()[i]; } } } + +float +Image::bytes_per_pixel (int c) const +{ + if (c == 3) { + return 0; + } + + switch (_pixel_format) { + case PIX_FMT_RGB24: + if (c == 0) { + return 3; + } else { + return 0; + } + case PIX_FMT_RGBA: + if (c == 0) { + return 4; + } else { + return 0; + } + case PIX_FMT_YUV420P: + case PIX_FMT_YUV422P: + if (c == 0) { + return 1; + } else { + return 0.5; + } + case PIX_FMT_YUV422P10LE: + if (c == 0) { + return 2; + } else { + return 1; + } + case PIX_FMT_YUV444P: + return 3; + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P10LE: + case PIX_FMT_YUV444P10BE: + return 6; + default: + assert (false); + } + + return 0; +} + + /** Construct a SimpleImage of a given size and format, allocating memory * as required. * * @param p Pixel format. * @param s Size in pixels. */ -SimpleImage::SimpleImage (PixelFormat p, Size s, function rounder) +SimpleImage::SimpleImage (AVPixelFormat p, libdcp::Size s, bool aligned) : Image (p) , _size (s) + , _aligned (aligned) +{ + allocate (); +} + +void +SimpleImage::allocate () { _data = (uint8_t **) av_malloc (4 * sizeof (uint8_t *)); _data[0] = _data[1] = _data[2] = _data[3] = 0; @@ -302,29 +469,54 @@ SimpleImage::SimpleImage (PixelFormat p, Size s, function rounder) _stride = (int *) av_malloc (4 * sizeof (int)); _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0; - switch (p) { - case PIX_FMT_RGB24: - _line_size[0] = s.width * 3; - break; - case PIX_FMT_RGBA: - _line_size[0] = s.width * 4; - break; - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: - _line_size[0] = s.width; - _line_size[1] = s.width / 2; - _line_size[2] = s.width / 2; - break; - default: - assert (false); + for (int i = 0; i < components(); ++i) { + _line_size[i] = _size.width * bytes_per_pixel(i); + _stride[i] = stride_round_up (i, _line_size, _aligned ? 32 : 1); + _data[i] = (uint8_t *) av_malloc (_stride[i] * lines (i)); } +} + +SimpleImage::SimpleImage (SimpleImage const & other) + : Image (other) +{ + _size = other._size; + _aligned = other._aligned; + + allocate (); for (int i = 0; i < components(); ++i) { - _stride[i] = rounder (_line_size[i]); - _data[i] = (uint8_t *) av_malloc (_stride[i] * lines (i)); + memcpy (_data[i], other._data[i], _line_size[i] * lines(i)); } } +SimpleImage& +SimpleImage::operator= (SimpleImage const & other) +{ + if (this == &other) { + return *this; + } + + SimpleImage tmp (other); + swap (tmp); + return *this; +} + +void +SimpleImage::swap (SimpleImage & other) +{ + Image::swap (other); + + std::swap (_size, other._size); + + for (int i = 0; i < 4; ++i) { + std::swap (_data[i], other._data[i]); + std::swap (_line_size[i], other._line_size[i]); + std::swap (_stride[i], other._stride[i]); + } + + std::swap (_aligned, other._aligned); +} + /** Destroy a SimpleImage */ SimpleImage::~SimpleImage () { @@ -355,54 +547,34 @@ SimpleImage::stride () const return _stride; } -Size +libdcp::Size SimpleImage::size () const { return _size; } -AlignedImage::AlignedImage (PixelFormat f, Size s) - : SimpleImage (f, s, boost::bind (round_up, _1, 32)) -{ - -} - -CompactImage::CompactImage (PixelFormat f, Size s) - : SimpleImage (f, s, boost::bind (round_up, _1, 1)) +bool +SimpleImage::aligned () const { - + return _aligned; } -CompactImage::CompactImage (shared_ptr im) - : SimpleImage (im->pixel_format(), im->size(), boost::bind (round_up, _1, 1)) -{ - assert (components() == im->components()); - - for (int c = 0; c < components(); ++c) { - - assert (line_size()[c] == im->line_size()[c]); - - uint8_t* t = data()[c]; - uint8_t* o = im->data()[c]; - - for (int y = 0; y < lines(c); ++y) { - memcpy (t, o, line_size()[c]); - t += stride()[c]; - o += im->stride()[c]; - } - } -} - -FilterBufferImage::FilterBufferImage (PixelFormat p, AVFilterBufferRef* b) +FilterBufferImage::FilterBufferImage (AVPixelFormat p, AVFilterBufferRef* b) : Image (p) , _buffer (b) { - + _line_size = (int *) av_malloc (4 * sizeof (int)); + _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0; + + for (int i = 0; i < components(); ++i) { + _line_size[i] = size().width * bytes_per_pixel(i); + } } FilterBufferImage::~FilterBufferImage () { avfilter_unref_buffer (_buffer); + av_free (_line_size); } uint8_t ** @@ -414,69 +586,57 @@ FilterBufferImage::data () const int * FilterBufferImage::line_size () const { - return _buffer->linesize; + return _line_size; } int * FilterBufferImage::stride () const { - /* XXX? */ + /* I've seen images where the _buffer->linesize is larger than the width + (by a small amount), suggesting that _buffer->linesize is what we call + stride. But I'm not sure. + */ return _buffer->linesize; } -Size +libdcp::Size FilterBufferImage::size () const { - return Size (_buffer->video->w, _buffer->video->h); -} - -/** XXX: this could be generalised to use any format, but I don't - * understand how avpicture_fill is supposed to be called with - * multi-planar images. - */ -RGBFrameImage::RGBFrameImage (Size s) - : Image (PIX_FMT_RGB24) - , _size (s) -{ - _frame = avcodec_alloc_frame (); - if (_frame == 0) { - throw EncodeError ("could not allocate frame"); - } - - _data = (uint8_t *) av_malloc (size().width * size().height * 3); - avpicture_fill ((AVPicture *) _frame, _data, PIX_FMT_RGB24, size().width, size().height); - _frame->width = size().width; - _frame->height = size().height; - _frame->format = PIX_FMT_RGB24; + return libdcp::Size (_buffer->video->w, _buffer->video->h); } -RGBFrameImage::~RGBFrameImage () +bool +FilterBufferImage::aligned () const { - av_free (_data); - av_free (_frame); + /* XXX? */ + return true; } -uint8_t ** -RGBFrameImage::data () const +RGBPlusAlphaImage::RGBPlusAlphaImage (shared_ptr im) + : SimpleImage (im->pixel_format(), im->size(), false) { - return _frame->data; -} + assert (im->pixel_format() == PIX_FMT_RGBA); + + _alpha = (uint8_t *) av_malloc (im->size().width * im->size().height); + + uint8_t* in = im->data()[0]; + uint8_t* out = data()[0]; + uint8_t* out_alpha = _alpha; + for (int y = 0; y < im->size().height; ++y) { + uint8_t* in_r = in; + for (int x = 0; x < im->size().width; ++x) { + *out++ = *in_r++; + *out++ = *in_r++; + *out++ = *in_r++; + *out_alpha++ = *in_r++; + } -int * -RGBFrameImage::line_size () const -{ - return _frame->linesize; + in += im->stride()[0]; + } } -int * -RGBFrameImage::stride () const +RGBPlusAlphaImage::~RGBPlusAlphaImage () { - /* XXX? */ - return line_size (); + av_free (_alpha); } -Size -RGBFrameImage::size () const -{ - return _size; -}