X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fimage.cc;h=c7dfc91cb374eb9c765b0009e22a00a407f1aff2;hb=3753cb8685e1755b067676345a5871db24149d0f;hp=9162637c7458a1c5f5576dd971f5756953f73f7a;hpb=5c457e804689e69289c26398a09e54e9f6b26e02;p=dcpomatic.git diff --git a/src/lib/image.cc b/src/lib/image.cc index 9162637c7..c7dfc91cb 100644 --- a/src/lib/image.cc +++ b/src/lib/image.cc @@ -18,30 +18,43 @@ */ /** @file src/image.cc - * @brief A set of classes to describe video images. + * @brief A class to describe a video image. */ -#include -#include #include -#include -#include -#include -#include extern "C" { -#include -#include #include -#include -#include #include +#include +#include } #include "image.h" #include "exceptions.h" #include "scaler.h" -using namespace std; -using namespace boost; +#include "i18n.h" + +using std::string; +using std::min; +using std::cout; +using std::cerr; +using boost::shared_ptr; +using libdcp::Size; + +int +Image::line_factor (int n) const +{ + if (n == 0) { + return 1; + } + + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("lines()", _pixel_format); + } + + return pow (2.0f, d->log2_chroma_h); +} /** @param n Component index. * @return Number of lines in the image for the given component. @@ -49,121 +62,109 @@ using namespace boost; int Image::lines (int n) const { - switch (_pixel_format) { - case PIX_FMT_YUV420P: - if (n == 0) { - return size().height; - } else { - return size().height / 2; - } - break; - case PIX_FMT_RGB24: - case PIX_FMT_RGBA: - case PIX_FMT_YUV422P10LE: - return size().height; - default: - assert (false); - } - - return 0; + return rint (ceil (static_cast(size().height) / line_factor (n))); } /** @return Number of components */ int Image::components () const { - switch (_pixel_format) { - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: - return 3; - case PIX_FMT_RGB24: - case PIX_FMT_RGBA: - return 1; - default: - assert (false); + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("components()", _pixel_format); } - return 0; + if ((d->flags & PIX_FMT_PLANAR) == 0) { + return 1; + } + + return d->nb_components; } +/** Crop this image, scale it to `inter_size' and then place it in a black frame of `out_size' */ shared_ptr -Image::scale (Size out_size, Scaler const * scaler) const +Image::crop_scale_window (Crop crop, libdcp::Size inter_size, libdcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const { assert (scaler); + /* Empirical testing suggests that sws_scale() will crash if + the input image is not aligned. + */ + assert (aligned ()); + + assert (out_size.width >= inter_size.width); + assert (out_size.height >= inter_size.height); - shared_ptr scaled (new AlignedImage (pixel_format(), out_size)); + /* Here's an image of out_size */ + shared_ptr out (new Image (out_format, out_size, out_aligned)); + out->make_black (); + /* Size of the image after any crop */ + libdcp::Size const cropped_size = crop.apply (size ()); + + /* Scale context for a scale from cropped_size to inter_size */ struct SwsContext* scale_context = sws_getContext ( - size().width, size().height, pixel_format(), - out_size.width, out_size.height, pixel_format(), + cropped_size.width, cropped_size.height, pixel_format(), + inter_size.width, inter_size.height, out_format, scaler->ffmpeg_id (), 0, 0, 0 ); + if (!scale_context) { + throw StringError (N_("Could not allocate SwsContext")); + } + + /* Prepare input data pointers with crop */ + uint8_t* scale_in_data[components()]; + for (int c = 0; c < components(); ++c) { + scale_in_data[c] = data()[c] + int (rint (bytes_per_pixel(c) * crop.left)) + stride()[c] * (crop.top / line_factor(c)); + } + + /* Corner of the image within out_size */ + Position const corner ((out_size.width - inter_size.width) / 2, (out_size.height - inter_size.height) / 2); + + uint8_t* scale_out_data[out->components()]; + for (int c = 0; c < out->components(); ++c) { + scale_out_data[c] = out->data()[c] + int (rint (out->bytes_per_pixel(c) * corner.x)) + out->stride()[c] * corner.y; + } + sws_scale ( scale_context, - data(), stride(), - 0, size().height, - scaled->data(), scaled->stride() + scale_in_data, stride(), + 0, cropped_size.height, + scale_out_data, out->stride() ); sws_freeContext (scale_context); - return scaled; + return out; } -/** Scale this image to a given size and convert it to RGB. - * @param out_size Output image size in pixels. - * @param scaler Scaler to use. - */ shared_ptr -Image::scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scaler) const +Image::scale (libdcp::Size out_size, Scaler const * scaler, AVPixelFormat out_format, bool out_aligned) const { assert (scaler); + /* Empirical testing suggests that sws_scale() will crash if + the input image is not aligned. + */ + assert (aligned ()); - Size content_size = out_size; - content_size.width -= (padding * 2); - - shared_ptr rgb (new AlignedImage (PIX_FMT_RGB24, content_size)); + shared_ptr scaled (new Image (out_format, out_size, out_aligned)); struct SwsContext* scale_context = sws_getContext ( size().width, size().height, pixel_format(), - content_size.width, content_size.height, PIX_FMT_RGB24, + out_size.width, out_size.height, out_format, scaler->ffmpeg_id (), 0, 0, 0 ); - /* Scale and convert to RGB from whatever its currently in (which may be RGB) */ sws_scale ( scale_context, data(), stride(), 0, size().height, - rgb->data(), rgb->stride() + scaled->data(), scaled->stride() ); - /* Put the image in the right place in a black frame if are padding; this is - a bit grubby and expensive, but probably inconsequential in the great - scheme of things. - */ - if (padding > 0) { - shared_ptr padded_rgb (new AlignedImage (PIX_FMT_RGB24, out_size)); - padded_rgb->make_black (); - - /* XXX: we are cheating a bit here; we know the frame is RGB so we can - make assumptions about its composition. - */ - uint8_t* p = padded_rgb->data()[0] + padding * 3; - uint8_t* q = rgb->data()[0]; - for (int j = 0; j < rgb->lines(0); ++j) { - memcpy (p, q, rgb->line_size()[0]); - p += padded_rgb->stride()[0]; - q += rgb->stride()[0]; - } - - rgb = padded_rgb; - } - sws_freeContext (scale_context); - return rgb; + return scaled; } /** Run a FFmpeg post-process on this image and return the processed version. @@ -171,12 +172,32 @@ Image::scale_and_convert_to_rgb (Size out_size, int padding, Scaler const * scal * @return Post-processed image. */ shared_ptr -Image::post_process (string pp) const +Image::post_process (string pp, bool aligned) const { - shared_ptr out (new AlignedImage (PIX_FMT_YUV420P, size ())); - + shared_ptr out (new Image (pixel_format(), size (), aligned)); + + int pp_format = 0; + switch (pixel_format()) { + case PIX_FMT_YUV420P: + pp_format = PP_FORMAT_420; + break; + case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV422P: + case PIX_FMT_UYVY422: + pp_format = PP_FORMAT_422; + break; + case PIX_FMT_YUV444P: + case PIX_FMT_YUV444P9BE: + case PIX_FMT_YUV444P9LE: + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV444P10LE: + pp_format = PP_FORMAT_444; + default: + throw PixelFormatError ("post_process", pixel_format()); + } + pp_mode* mode = pp_get_mode_by_name_and_quality (pp.c_str (), PP_QUALITY_MAX); - pp_context* context = pp_get_context (size().width, size().height, PP_FORMAT_420 | PP_CPU_CAPS_MMX2); + pp_context* context = pp_get_context (size().width, size().height, pp_format | PP_CPU_CAPS_MMX2); pp_postprocess ( (const uint8_t **) data(), stride(), @@ -191,28 +212,183 @@ Image::post_process (string pp) const return out; } +shared_ptr +Image::crop (Crop crop, bool aligned) const +{ + libdcp::Size cropped_size = crop.apply (size ()); + shared_ptr out (new Image (pixel_format(), cropped_size, aligned)); + + for (int c = 0; c < components(); ++c) { + int const crop_left_in_bytes = bytes_per_pixel(c) * crop.left; + /* bytes_per_pixel() could be a fraction; in this case the stride will be rounded + up, and we need to make sure that we copy over the width (up to the stride) + rather than short of the width; hence the ceil() here. + */ + int const cropped_width_in_bytes = ceil (bytes_per_pixel(c) * cropped_size.width); + + /* Start of the source line, cropped from the top but not the left */ + uint8_t* in_p = data()[c] + (crop.top / out->line_factor(c)) * stride()[c]; + uint8_t* out_p = out->data()[c]; + + for (int y = 0; y < out->lines(c); ++y) { + memcpy (out_p, in_p + crop_left_in_bytes, cropped_width_in_bytes); + in_p += stride()[c]; + out_p += out->stride()[c]; + } + } + + return out; +} + +/** Blacken a YUV image whose bits per pixel is rounded up to 16 */ +void +Image::yuv_16_black (uint16_t v, bool alpha) +{ + memset (data()[0], 0, lines(0) * stride()[0]); + for (int i = 1; i < 3; ++i) { + int16_t* p = reinterpret_cast (data()[i]); + for (int y = 0; y < lines(i); ++y) { + /* We divide by 2 here because we are writing 2 bytes at a time */ + for (int x = 0; x < line_size()[i] / 2; ++x) { + p[x] = v; + } + p += stride()[i] / 2; + } + } + + if (alpha) { + memset (data()[3], 0, lines(3) * stride()[3]); + } +} + +uint16_t +Image::swap_16 (uint16_t v) +{ + return ((v >> 8) & 0xff) | ((v & 0xff) << 8); +} + void Image::make_black () { + /* U/V black value for 8-bit colour */ + static uint8_t const eight_bit_uv = (1 << 7) - 1; + /* U/V black value for 9-bit colour */ + static uint16_t const nine_bit_uv = (1 << 8) - 1; + /* U/V black value for 10-bit colour */ + static uint16_t const ten_bit_uv = (1 << 9) - 1; + /* U/V black value for 16-bit colour */ + static uint16_t const sixteen_bit_uv = (1 << 15) - 1; + switch (_pixel_format) { case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV422P: + case PIX_FMT_YUV444P: + case PIX_FMT_YUV411P: memset (data()[0], 0, lines(0) * stride()[0]); - memset (data()[1], 0x80, lines(1) * stride()[1]); - memset (data()[2], 0x80, lines(2) * stride()[2]); + memset (data()[1], eight_bit_uv, lines(1) * stride()[1]); + memset (data()[2], eight_bit_uv, lines(2) * stride()[2]); break; - case PIX_FMT_RGB24: + case PIX_FMT_YUVJ420P: + case PIX_FMT_YUVJ422P: + case PIX_FMT_YUVJ444P: memset (data()[0], 0, lines(0) * stride()[0]); + memset (data()[1], eight_bit_uv + 1, lines(1) * stride()[1]); + memset (data()[2], eight_bit_uv + 1, lines(2) * stride()[2]); break; + case PIX_FMT_YUV422P9LE: + case PIX_FMT_YUV444P9LE: + yuv_16_black (nine_bit_uv, false); + break; + + case PIX_FMT_YUV422P9BE: + case PIX_FMT_YUV444P9BE: + yuv_16_black (swap_16 (nine_bit_uv), false); + break; + + case PIX_FMT_YUV422P10LE: + case PIX_FMT_YUV444P10LE: + yuv_16_black (ten_bit_uv, false); + break; + + case PIX_FMT_YUV422P16LE: + case PIX_FMT_YUV444P16LE: + yuv_16_black (sixteen_bit_uv, false); + break; + + case PIX_FMT_YUV444P10BE: + case PIX_FMT_YUV422P10BE: + yuv_16_black (swap_16 (ten_bit_uv), false); + break; + + case AV_PIX_FMT_YUVA420P9BE: + case AV_PIX_FMT_YUVA422P9BE: + case AV_PIX_FMT_YUVA444P9BE: + yuv_16_black (swap_16 (nine_bit_uv), true); + break; + + case AV_PIX_FMT_YUVA420P9LE: + case AV_PIX_FMT_YUVA422P9LE: + case AV_PIX_FMT_YUVA444P9LE: + yuv_16_black (nine_bit_uv, true); + break; + + case AV_PIX_FMT_YUVA420P10BE: + case AV_PIX_FMT_YUVA422P10BE: + case AV_PIX_FMT_YUVA444P10BE: + yuv_16_black (swap_16 (ten_bit_uv), true); + break; + + case AV_PIX_FMT_YUVA420P10LE: + case AV_PIX_FMT_YUVA422P10LE: + case AV_PIX_FMT_YUVA444P10LE: + yuv_16_black (ten_bit_uv, true); + break; + + case AV_PIX_FMT_YUVA420P16BE: + case AV_PIX_FMT_YUVA422P16BE: + case AV_PIX_FMT_YUVA444P16BE: + yuv_16_black (swap_16 (sixteen_bit_uv), true); + break; + + case AV_PIX_FMT_YUVA420P16LE: + case AV_PIX_FMT_YUVA422P16LE: + case AV_PIX_FMT_YUVA444P16LE: + yuv_16_black (sixteen_bit_uv, true); + break; + + case PIX_FMT_RGB24: + case PIX_FMT_ARGB: + case PIX_FMT_RGBA: + case PIX_FMT_ABGR: + case PIX_FMT_BGRA: + memset (data()[0], 0, lines(0) * stride()[0]); + break; + + case PIX_FMT_UYVY422: + { + int const Y = lines(0); + int const X = line_size()[0]; + uint8_t* p = data()[0]; + for (int y = 0; y < Y; ++y) { + for (int x = 0; x < X / 4; ++x) { + *p++ = eight_bit_uv; // Cb + *p++ = 0; // Y0 + *p++ = eight_bit_uv; // Cr + *p++ = 0; // Y1 + } + } + break; + } + default: - assert (false); + throw PixelFormatError ("make_black()", _pixel_format); } } void -Image::alpha_blend (shared_ptr other, Position position) +Image::alpha_blend (shared_ptr other, Position position) { /* Only implemented for RGBA onto RGB24 so far */ assert (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGBA); @@ -247,13 +423,28 @@ Image::alpha_blend (shared_ptr other, Position position) } } +void +Image::copy (shared_ptr other, Position position) +{ + /* Only implemented for RGB24 onto RGB24 so far */ + assert (_pixel_format == PIX_FMT_RGB24 && other->pixel_format() == PIX_FMT_RGB24); + assert (position.x >= 0 && position.y >= 0); + + int const N = min (position.x + other->size().width, size().width) - position.x; + for (int ty = position.y, oy = 0; ty < size().height && oy < other->size().height; ++ty, ++oy) { + uint8_t * const tp = data()[0] + ty * stride()[0] + position.x * 3; + uint8_t * const op = other->data()[0] + oy * other->stride()[0]; + memcpy (tp, op, N * 3); + } +} + void Image::read_from_socket (shared_ptr socket) { for (int i = 0; i < components(); ++i) { uint8_t* p = data()[i]; for (int y = 0; y < lines(i); ++y) { - socket->read_definite_and_consume (p, line_size()[i], 30); + socket->read (p, line_size()[i]); p += stride()[i]; } } @@ -265,206 +456,212 @@ Image::write_to_socket (shared_ptr socket) const for (int i = 0; i < components(); ++i) { uint8_t* p = data()[i]; for (int y = 0; y < lines(i); ++y) { - socket->write (p, line_size()[i], 30); + socket->write (p, line_size()[i]); p += stride()[i]; } } } -/** Construct a SimpleImage of a given size and format, allocating memory + +float +Image::bytes_per_pixel (int c) const +{ + AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format); + if (!d) { + throw PixelFormatError ("lines()", _pixel_format); + } + + if (c >= components()) { + return 0; + } + + float bpp[4] = { 0, 0, 0, 0 }; + + bpp[0] = floor ((d->comp[0].depth_minus1 + 1 + 7) / 8); + if (d->nb_components > 1) { + bpp[1] = floor ((d->comp[1].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 2) { + bpp[2] = floor ((d->comp[2].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); + } + if (d->nb_components > 3) { + bpp[3] = floor ((d->comp[3].depth_minus1 + 1 + 7) / 8) / pow (2.0f, d->log2_chroma_w); + } + + if ((d->flags & PIX_FMT_PLANAR) == 0) { + /* Not planar; sum them up */ + return bpp[0] + bpp[1] + bpp[2] + bpp[3]; + } + + return bpp[c]; +} + +/** Construct a Image of a given size and format, allocating memory * as required. * * @param p Pixel format. * @param s Size in pixels. */ -SimpleImage::SimpleImage (PixelFormat p, Size s, function rounder) - : Image (p) - , _size (s) +Image::Image (AVPixelFormat p, libdcp::Size s, bool aligned) + : libdcp::Image (s) + , _pixel_format (p) + , _aligned (aligned) { - _data = (uint8_t **) av_malloc (4 * sizeof (uint8_t *)); + allocate (); +} + +void +Image::allocate () +{ + _data = (uint8_t **) wrapped_av_malloc (4 * sizeof (uint8_t *)); _data[0] = _data[1] = _data[2] = _data[3] = 0; - _line_size = (int *) av_malloc (4 * sizeof (int)); + _line_size = (int *) wrapped_av_malloc (4 * sizeof (int)); _line_size[0] = _line_size[1] = _line_size[2] = _line_size[3] = 0; - _stride = (int *) av_malloc (4 * sizeof (int)); + _stride = (int *) wrapped_av_malloc (4 * sizeof (int)); _stride[0] = _stride[1] = _stride[2] = _stride[3] = 0; - switch (p) { - case PIX_FMT_RGB24: - _line_size[0] = s.width * 3; - break; - case PIX_FMT_RGBA: - _line_size[0] = s.width * 4; - break; - case PIX_FMT_YUV420P: - case PIX_FMT_YUV422P10LE: - _line_size[0] = s.width; - _line_size[1] = s.width / 2; - _line_size[2] = s.width / 2; - break; - default: - assert (false); - } - for (int i = 0; i < components(); ++i) { - _stride[i] = rounder (_line_size[i]); - _data[i] = (uint8_t *) av_malloc (_stride[i] * lines (i)); + _line_size[i] = ceil (_size.width * bytes_per_pixel(i)); + _stride[i] = stride_round_up (i, _line_size, _aligned ? 32 : 1); + + /* The assembler function ff_rgb24ToY_avx (in libswscale/x86/input.asm) + uses a 16-byte fetch to read three bytes (R/G/B) of image data. + Hence on the last pixel of the last line it reads over the end of + the actual data by 1 byte. If the width of an image is a multiple + of the stride alignment there will be no padding at the end of image lines. + OS X crashes on this illegal read, though other operating systems don't + seem to mind. The nasty + 1 in this malloc makes sure there is always a byte + for that instruction to read safely. + */ + _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * lines (i) + 1); } } -/** Destroy a SimpleImage */ -SimpleImage::~SimpleImage () +Image::Image (Image const & other) + : libdcp::Image (other) + , _pixel_format (other._pixel_format) + , _aligned (other._aligned) { + allocate (); + for (int i = 0; i < components(); ++i) { - av_free (_data[i]); + uint8_t* p = _data[i]; + uint8_t* q = other._data[i]; + for (int j = 0; j < lines(i); ++j) { + memcpy (p, q, _line_size[i]); + p += stride()[i]; + q += other.stride()[i]; + } } - - av_free (_data); - av_free (_line_size); - av_free (_stride); } -uint8_t ** -SimpleImage::data () const +Image::Image (AVFrame* frame) + : libdcp::Image (libdcp::Size (frame->width, frame->height)) + , _pixel_format (static_cast (frame->format)) + , _aligned (true) { - return _data; -} + allocate (); -int * -SimpleImage::line_size () const -{ - return _line_size; -} - -int * -SimpleImage::stride () const -{ - return _stride; -} - -Size -SimpleImage::size () const -{ - return _size; -} - -AlignedImage::AlignedImage (PixelFormat f, Size s) - : SimpleImage (f, s, boost::bind (round_up, _1, 32)) -{ - -} - -CompactImage::CompactImage (PixelFormat f, Size s) - : SimpleImage (f, s, boost::bind (round_up, _1, 1)) -{ - -} - -CompactImage::CompactImage (shared_ptr im) - : SimpleImage (im->pixel_format(), im->size(), boost::bind (round_up, _1, 1)) -{ - assert (components() == im->components()); - - for (int c = 0; c < components(); ++c) { - - assert (line_size()[c] == im->line_size()[c]); - - uint8_t* t = data()[c]; - uint8_t* o = im->data()[c]; - - for (int y = 0; y < lines(c); ++y) { - memcpy (t, o, line_size()[c]); - t += stride()[c]; - o += im->stride()[c]; + for (int i = 0; i < components(); ++i) { + uint8_t* p = _data[i]; + uint8_t* q = frame->data[i]; + for (int j = 0; j < lines(i); ++j) { + memcpy (p, q, _line_size[i]); + p += stride()[i]; + /* AVFrame's linesize is what we call `stride' */ + q += frame->linesize[i]; } } } -FilterBufferImage::FilterBufferImage (PixelFormat p, AVFilterBufferRef* b) - : Image (p) - , _buffer (b) +Image::Image (shared_ptr other, bool aligned) + : libdcp::Image (other) + , _pixel_format (other->_pixel_format) + , _aligned (aligned) { + allocate (); + for (int i = 0; i < components(); ++i) { + assert(line_size()[i] == other->line_size()[i]); + uint8_t* p = _data[i]; + uint8_t* q = other->data()[i]; + for (int j = 0; j < lines(i); ++j) { + memcpy (p, q, line_size()[i]); + p += stride()[i]; + q += other->stride()[i]; + } + } } -FilterBufferImage::~FilterBufferImage () +Image& +Image::operator= (Image const & other) { - avfilter_unref_buffer (_buffer); -} + if (this == &other) { + return *this; + } -uint8_t ** -FilterBufferImage::data () const -{ - return _buffer->data; + Image tmp (other); + swap (tmp); + return *this; } -int * -FilterBufferImage::line_size () const +void +Image::swap (Image & other) { - return _buffer->linesize; -} + libdcp::Image::swap (other); + + std::swap (_pixel_format, other._pixel_format); -int * -FilterBufferImage::stride () const -{ - /* XXX? */ - return _buffer->linesize; -} + for (int i = 0; i < 4; ++i) { + std::swap (_data[i], other._data[i]); + std::swap (_line_size[i], other._line_size[i]); + std::swap (_stride[i], other._stride[i]); + } -Size -FilterBufferImage::size () const -{ - return Size (_buffer->video->w, _buffer->video->h); + std::swap (_aligned, other._aligned); } -/** XXX: this could be generalised to use any format, but I don't - * understand how avpicture_fill is supposed to be called with - * multi-planar images. - */ -RGBFrameImage::RGBFrameImage (Size s) - : Image (PIX_FMT_RGB24) - , _size (s) +/** Destroy a Image */ +Image::~Image () { - _frame = avcodec_alloc_frame (); - if (_frame == 0) { - throw EncodeError ("could not allocate frame"); + for (int i = 0; i < components(); ++i) { + av_free (_data[i]); } - _data = (uint8_t *) av_malloc (size().width * size().height * 3); - avpicture_fill ((AVPicture *) _frame, _data, PIX_FMT_RGB24, size().width, size().height); - _frame->width = size().width; - _frame->height = size().height; - _frame->format = PIX_FMT_RGB24; -} - -RGBFrameImage::~RGBFrameImage () -{ av_free (_data); - av_free (_frame); + av_free (_line_size); + av_free (_stride); } uint8_t ** -RGBFrameImage::data () const +Image::data () const { - return _frame->data; + return _data; } int * -RGBFrameImage::line_size () const +Image::line_size () const { - return _frame->linesize; + return _line_size; } int * -RGBFrameImage::stride () const +Image::stride () const { - /* XXX? */ - return line_size (); + return _stride; } -Size -RGBFrameImage::size () const +libdcp::Size +Image::size () const { return _size; } + +bool +Image::aligned () const +{ + return _aligned; +} +