Fix incorrect images when cropping without stretch.
[dcpomatic.git] / src / lib / image.cc
index 03a4d96b4848da8a133828c144195b76033d396c..1439d9f7f964228385ba20e28c0bbb8ba85082c5 100644 (file)
@@ -27,6 +27,7 @@
 #include "timer.h"
 #include "rect.h"
 #include "util.h"
+#include "compose.hpp"
 #include "dcpomatic_socket.h"
 #include <dcp/rgb_xyz.h>
 #include <dcp/transfer_function.h>
@@ -36,6 +37,10 @@ extern "C" {
 #include <libavutil/pixdesc.h>
 #include <libavutil/frame.h>
 }
+#include <png.h>
+#if HAVE_VALGRIND_MEMCHECK_H
+#include <valgrind/memcheck.h>
+#endif
 #include <iostream>
 
 #include "i18n.h"
@@ -68,15 +73,16 @@ Image::vertical_factor (int n) const
 int
 Image::horizontal_factor (int n) const
 {
-       int horizontal_factor = 1;
-       if (n > 0) {
-               AVPixFmtDescriptor const * d = av_pix_fmt_desc_get (_pixel_format);
-               if (!d) {
-                       throw PixelFormatError ("sample_size()", _pixel_format);
-               }
-               horizontal_factor = pow (2.0f, d->log2_chroma_w);
+       if (n == 0) {
+               return 1;
+       }
+
+       AVPixFmtDescriptor const * d = av_pix_fmt_desc_get(_pixel_format);
+       if (!d) {
+               throw PixelFormatError ("sample_size()", _pixel_format);
        }
-       return horizontal_factor;
+
+       return pow (2.0f, d->log2_chroma_w);
 }
 
 /** @param n Component index.
@@ -100,6 +106,10 @@ Image::planes () const
                throw PixelFormatError ("planes()", _pixel_format);
        }
 
+       if (_pixel_format == AV_PIX_FMT_PAL8) {
+               return 2;
+       }
+
        if ((d->flags & AV_PIX_FMT_FLAG_PLANAR) == 0) {
                return 1;
        }
@@ -130,29 +140,7 @@ Image::crop_scale_window (
        DCPOMATIC_ASSERT (out_size.width >= inter_size.width);
        DCPOMATIC_ASSERT (out_size.height >= inter_size.height);
 
-       /* Here's an image of out_size.  Below we may write to it starting at an offset so we get some padding.
-          Hence we want to write in the following pattern:
-
-          block start   write start                                  line end
-          |..(padding)..|<------line-size------------->|..(padding)..|
-          |..(padding)..|<------line-size------------->|..(padding)..|
-          |..(padding)..|<------line-size------------->|..(padding)..|
-
-          where line-size is of the smaller (inter_size) image and the full padded line length is that of
-          out_size.  To get things to work we have to tell FFmpeg that the stride is that of out_size.
-          However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full
-          specified *stride*.  This does not matter until we get to the last line:
-
-          block start   write start                                  line end
-          |..(padding)..|<------line-size------------->|XXXwrittenXXX|
-          |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX|
-          |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX
-                                                                      ^^^^ out of bounds
-
-          To get around this, we ask Image to overallocate its buffers by the overrun.
-       */
-
-       shared_ptr<Image> out (new Image (out_format, out_size, out_aligned, (out_size.width - inter_size.width) / 2));
+       shared_ptr<Image> out (new Image(out_format, out_size, out_aligned));
        out->make_black ();
 
        /* Size of the image after any crop */
@@ -175,6 +163,18 @@ Image::crop_scale_window (
                SWS_CS_ITU709
        };
 
+       /* The 3rd parameter here is:
+          0 -> source range MPEG (i.e. "video", 16-235)
+          1 -> source range JPEG (i.e. "full", 0-255)
+          And the 5th:
+          0 -> destination range MPEG (i.e. "video", 16-235)
+          1 -> destination range JPEG (i.e. "full", 0-255)
+
+          But remember: sws_setColorspaceDetails ignores
+          these parameters unless the image isYUV or isGray
+          (if it's neither, it uses video range for source
+          and destination).
+       */
        sws_setColorspaceDetails (
                scale_context,
                sws_getCoefficients (lut[yuv_to_rgb]), 0,
@@ -182,8 +182,8 @@ Image::crop_scale_window (
                0, 1 << 16, 1 << 16
                );
 
-       AVPixFmtDescriptor const * desc = av_pix_fmt_desc_get (_pixel_format);
-       if (!desc) {
+       AVPixFmtDescriptor const * in_desc = av_pix_fmt_desc_get (_pixel_format);
+       if (!in_desc) {
                throw PixelFormatError ("crop_scale_window()", _pixel_format);
        }
 
@@ -195,16 +195,23 @@ Image::crop_scale_window (
                   round down so that we don't crop a subsampled pixel until
                   we've cropped all of its Y-channel pixels.
                */
-               int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) desc->log2_chroma_w);
+               int const x = lrintf (bytes_per_pixel(c) * crop.left) & ~ ((int) in_desc->log2_chroma_w);
                scale_in_data[c] = data()[c] + x + stride()[c] * (crop.top / vertical_factor(c));
        }
 
        /* Corner of the image within out_size */
        Position<int> const corner ((out_size.width - inter_size.width) / 2, (out_size.height - inter_size.height) / 2);
 
+       AVPixFmtDescriptor const * out_desc = av_pix_fmt_desc_get (out_format);
+       if (!out_desc) {
+               throw PixelFormatError ("crop_scale_window()", out_format);
+       }
+
        uint8_t* scale_out_data[out->planes()];
        for (int c = 0; c < out->planes(); ++c) {
-               scale_out_data[c] = out->data()[c] + lrintf (out->bytes_per_pixel(c) * corner.x) + out->stride()[c] * corner.y;
+               /* See the note in the crop loop above */
+               int const x = lrintf (out->bytes_per_pixel(c) * corner.x) & ~ ((int) out_desc->log2_chroma_w);
+               scale_out_data[c] = out->data()[c] + x + out->stride()[c] * (corner.y / out->vertical_factor(c));
        }
 
        sws_scale (
@@ -216,9 +223,23 @@ Image::crop_scale_window (
 
        sws_freeContext (scale_context);
 
+       if (crop != Crop() && cropped_size == inter_size && _pixel_format == out_format) {
+               /* We are cropping without any scaling or pixel format conversion, so FFmpeg may have left some
+                  data behind in our image.  Clear it out.  It may get to the point where we should just stop
+                  trying to be clever with cropping.
+               */
+               out->make_part_black (corner.x + cropped_size.width, out_size.width - cropped_size.width);
+       }
+
        return out;
 }
 
+shared_ptr<Image>
+Image::convert_pixel_format (dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_format, bool out_aligned, bool fast) const
+{
+       return scale(size(), yuv_to_rgb, out_format, out_aligned, fast);
+}
+
 /** @param out_size Size to scale to.
  *  @param yuv_to_rgb YUVToRGB transform transform to use, if required.
  *  @param out_format Output pixel format.
@@ -239,7 +260,7 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo
        struct SwsContext* scale_context = sws_getContext (
                size().width, size().height, pixel_format(),
                out_size.width, out_size.height, out_format,
-               fast ? SWS_FAST_BILINEAR : SWS_BICUBIC, 0, 0, 0
+               (fast ? SWS_FAST_BILINEAR : SWS_BICUBIC) | SWS_ACCURATE_RND, 0, 0, 0
                );
 
        DCPOMATIC_ASSERT (yuv_to_rgb < dcp::YUV_TO_RGB_COUNT);
@@ -248,6 +269,18 @@ Image::scale (dcp::Size out_size, dcp::YUVToRGB yuv_to_rgb, AVPixelFormat out_fo
                SWS_CS_ITU709
        };
 
+       /* The 3rd parameter here is:
+          0 -> source range MPEG (i.e. "video", 16-235)
+          1 -> source range JPEG (i.e. "full", 0-255)
+          And the 5th:
+          0 -> destination range MPEG (i.e. "video", 16-235)
+          1 -> destination range JPEG (i.e. "full", 0-255)
+
+          But remember: sws_setColorspaceDetails ignores
+          these parameters unless the image isYUV or isGray
+          (if it's neither, it uses video range for source
+          and destination).
+       */
        sws_setColorspaceDetails (
                scale_context,
                sws_getCoefficients (lut[yuv_to_rgb]), 0,
@@ -295,6 +328,36 @@ Image::swap_16 (uint16_t v)
        return ((v >> 8) & 0xff) | ((v & 0xff) << 8);
 }
 
+void
+Image::make_part_black (int x, int w)
+{
+       switch (_pixel_format) {
+       case AV_PIX_FMT_RGB24:
+       case AV_PIX_FMT_ARGB:
+       case AV_PIX_FMT_RGBA:
+       case AV_PIX_FMT_ABGR:
+       case AV_PIX_FMT_BGRA:
+       case AV_PIX_FMT_RGB555LE:
+       case AV_PIX_FMT_RGB48LE:
+       case AV_PIX_FMT_RGB48BE:
+       case AV_PIX_FMT_XYZ12LE:
+       {
+               int const h = sample_size(0).height;
+               int const bpp = bytes_per_pixel(0);
+               int const s = stride()[0];
+               uint8_t* p = data()[0];
+               for (int y = 0; y < h; y++) {
+                       memset (p + x * bpp, 0, w * bpp);
+                       p += s;
+               }
+               break;
+       }
+
+       default:
+               throw PixelFormatError ("make_part_black()", _pixel_format);
+       }
+}
+
 void
 Image::make_black ()
 {
@@ -422,48 +485,21 @@ Image::make_black ()
 void
 Image::make_transparent ()
 {
-       if (_pixel_format != AV_PIX_FMT_RGBA) {
+       if (_pixel_format != AV_PIX_FMT_BGRA && _pixel_format != AV_PIX_FMT_RGBA) {
                throw PixelFormatError ("make_transparent()", _pixel_format);
        }
 
        memset (data()[0], 0, sample_size(0).height * stride()[0]);
 }
 
-template <class T>
-void
-component (
-       int n,
-       Image* base,
-       shared_ptr<const Image> other,
-       shared_ptr<const Image> rgba,
-       int start_base_x, int start_base_y,
-       int start_other_x, int start_other_y
-       )
-{
-       dcp::Size const base_size = base->sample_size(n);
-       dcp::Size const other_size = other->sample_size(n);
-       for (int by = start_base_y, oy = start_other_y; by < base_size.height && oy < other_size.height; ++by, ++oy) {
-               /* base image */
-               T* bp = ((T*) (base->data()[n] + by * base->stride()[n])) + start_base_x;
-               /* overlay image */
-               T* op = ((T*) (other->data()[n] + oy * other->stride()[n]));
-               /* original RGBA for alpha channel */
-               uint8_t* rp = rgba->data()[0] + oy * rgba->stride()[0];
-               for (int bx = start_base_x, ox = start_other_x; bx < base_size.width && ox < other_size.width; ++bx, ++ox) {
-                       float const alpha = float (rp[3]) / 255;
-                       *bp = *op * alpha + *bp * (1 - alpha);
-                       ++bp;
-                       ++op;
-                       rp += 4;
-               }
-       }
-}
-
 void
 Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
 {
-       /* We're blending RGBA images; first byte is blue, second byte is green, third byte blue, fourth byte alpha */
-       DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_RGBA);
+       /* We're blending RGBA or BGRA images */
+       DCPOMATIC_ASSERT (other->pixel_format() == AV_PIX_FMT_BGRA || other->pixel_format() == AV_PIX_FMT_RGBA);
+       int const blue = other->pixel_format() == AV_PIX_FMT_BGRA ? 0 : 2;
+       int const red = other->pixel_format() == AV_PIX_FMT_BGRA ? 2 : 0;
+
        int const other_bpp = 4;
 
        int start_tx = position.x;
@@ -492,9 +528,9 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                        uint8_t* op = other->data()[0] + oy * other->stride()[0];
                        for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
                                float const alpha = float (op[3]) / 255;
-                               tp[0] = op[2] * alpha + tp[0] * (1 - alpha);
+                               tp[0] = op[red] * alpha + tp[0] * (1 - alpha);
                                tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
-                               tp[2] = op[0] * alpha + tp[2] * (1 - alpha);
+                               tp[2] = op[blue] * alpha + tp[2] * (1 - alpha);
 
                                tp += this_bpp;
                                op += other_bpp;
@@ -503,6 +539,24 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                break;
        }
        case AV_PIX_FMT_BGRA:
+       {
+               int const this_bpp = 4;
+               for (int ty = start_ty, oy = start_oy; ty < size().height && oy < other->size().height; ++ty, ++oy) {
+                       uint8_t* tp = data()[0] + ty * stride()[0] + start_tx * this_bpp;
+                       uint8_t* op = other->data()[0] + oy * other->stride()[0];
+                       for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
+                               float const alpha = float (op[3]) / 255;
+                               tp[0] = op[blue] * alpha + tp[0] * (1 - alpha);
+                               tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
+                               tp[2] = op[red] * alpha + tp[2] * (1 - alpha);
+                               tp[3] = op[3] * alpha + tp[3] * (1 - alpha);
+
+                               tp += this_bpp;
+                               op += other_bpp;
+                       }
+               }
+               break;
+       }
        case AV_PIX_FMT_RGBA:
        {
                int const this_bpp = 4;
@@ -511,9 +565,9 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                        uint8_t* op = other->data()[0] + oy * other->stride()[0];
                        for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
                                float const alpha = float (op[3]) / 255;
-                               tp[0] = op[0] * alpha + tp[0] * (1 - alpha);
+                               tp[0] = op[red] * alpha + tp[0] * (1 - alpha);
                                tp[1] = op[1] * alpha + tp[1] * (1 - alpha);
-                               tp[2] = op[2] * alpha + tp[2] * (1 - alpha);
+                               tp[2] = op[blue] * alpha + tp[2] * (1 - alpha);
                                tp[3] = op[3] * alpha + tp[3] * (1 - alpha);
 
                                tp += this_bpp;
@@ -530,10 +584,10 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                        uint8_t* op = other->data()[0] + oy * other->stride()[0];
                        for (int tx = start_tx, ox = start_ox; tx < size().width && ox < other->size().width; ++tx, ++ox) {
                                float const alpha = float (op[3]) / 255;
-                               /* Blend high bytes; the RGBA in op appears to be BGRA */
-                               tp[1] = op[2] * alpha + tp[1] * (1 - alpha);
+                               /* Blend high bytes */
+                               tp[1] = op[red] * alpha + tp[1] * (1 - alpha);
                                tp[3] = op[1] * alpha + tp[3] * (1 - alpha);
-                               tp[5] = op[0] * alpha + tp[5] * (1 - alpha);
+                               tp[5] = op[blue] * alpha + tp[5] * (1 - alpha);
 
                                tp += this_bpp;
                                op += other_bpp;
@@ -556,9 +610,9 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
                                float const alpha = float (op[3]) / 255;
 
                                /* Convert sRGB to XYZ; op is BGRA.  First, input gamma LUT */
-                               double const r = lut_in[op[2]];
+                               double const r = lut_in[op[red]];
                                double const g = lut_in[op[1]];
-                               double const b = lut_in[op[0]];
+                               double const b = lut_in[op[blue]];
 
                                /* RGB to XYZ, including Bradford transform and DCI companding */
                                double const x = max (0.0, min (65535.0, r * fast_matrix[0] + g * fast_matrix[1] + b * fast_matrix[2]));
@@ -578,18 +632,105 @@ Image::alpha_blend (shared_ptr<const Image> other, Position<int> position)
        }
        case AV_PIX_FMT_YUV420P:
        {
-               shared_ptr<Image> yuv = other->scale (other->size(), dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
-               component<uint8_t> (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
-               component<uint8_t> (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
-               component<uint8_t> (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+               shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+               dcp::Size const ts = size();
+               dcp::Size const os = yuv->size();
+               for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
+                       int const hty = ty / 2;
+                       int const hoy = oy / 2;
+                       uint8_t* tY = data()[0] + (ty * stride()[0]) + start_tx;
+                       uint8_t* tU = data()[1] + (hty * stride()[1]) + start_tx / 2;
+                       uint8_t* tV = data()[2] + (hty * stride()[2]) + start_tx / 2;
+                       uint8_t* oY = yuv->data()[0] + (oy * yuv->stride()[0]) + start_ox;
+                       uint8_t* oU = yuv->data()[1] + (hoy * yuv->stride()[1]) + start_ox / 2;
+                       uint8_t* oV = yuv->data()[2] + (hoy * yuv->stride()[2]) + start_ox / 2;
+                       uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
+                       for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
+                               float const a = float(alpha[3]) / 255;
+                               *tY = *oY * a + *tY * (1 - a);
+                               *tU = *oU * a + *tU * (1 - a);
+                               *tV = *oV * a + *tV * (1 - a);
+                               ++tY;
+                               ++oY;
+                               if (tx % 2) {
+                                       ++tU;
+                                       ++tV;
+                               }
+                               if (ox % 2) {
+                                       ++oU;
+                                       ++oV;
+                               }
+                               alpha += 4;
+                       }
+               }
                break;
        }
        case AV_PIX_FMT_YUV420P10:
        {
-               shared_ptr<Image> yuv = other->scale (other->size(), dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
-               component<uint16_t> (0, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
-               component<uint8_t>  (1, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
-               component<uint8_t>  (2, this, yuv, other, start_tx, start_ty, start_ox, start_oy);
+               shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+               dcp::Size const ts = size();
+               dcp::Size const os = yuv->size();
+               for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
+                       int const hty = ty / 2;
+                       int const hoy = oy / 2;
+                       uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx;
+                       uint16_t* tU = ((uint16_t *) (data()[1] + (hty * stride()[1]))) + start_tx / 2;
+                       uint16_t* tV = ((uint16_t *) (data()[2] + (hty * stride()[2]))) + start_tx / 2;
+                       uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox;
+                       uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (hoy * yuv->stride()[1]))) + start_ox / 2;
+                       uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (hoy * yuv->stride()[2]))) + start_ox / 2;
+                       uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
+                       for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
+                               float const a = float(alpha[3]) / 255;
+                               *tY = *oY * a + *tY * (1 - a);
+                               *tU = *oU * a + *tU * (1 - a);
+                               *tV = *oV * a + *tV * (1 - a);
+                               ++tY;
+                               ++oY;
+                               if (tx % 2) {
+                                       ++tU;
+                                       ++tV;
+                               }
+                               if (ox % 2) {
+                                       ++oU;
+                                       ++oV;
+                               }
+                               alpha += 4;
+                       }
+               }
+               break;
+       }
+       case AV_PIX_FMT_YUV422P10LE:
+       {
+               shared_ptr<Image> yuv = other->convert_pixel_format (dcp::YUV_TO_RGB_REC709, _pixel_format, false, false);
+               dcp::Size const ts = size();
+               dcp::Size const os = yuv->size();
+               for (int ty = start_ty, oy = start_oy; ty < ts.height && oy < os.height; ++ty, ++oy) {
+                       uint16_t* tY = ((uint16_t *) (data()[0] + (ty * stride()[0]))) + start_tx;
+                       uint16_t* tU = ((uint16_t *) (data()[1] + (ty * stride()[1]))) + start_tx / 2;
+                       uint16_t* tV = ((uint16_t *) (data()[2] + (ty * stride()[2]))) + start_tx / 2;
+                       uint16_t* oY = ((uint16_t *) (yuv->data()[0] + (oy * yuv->stride()[0]))) + start_ox;
+                       uint16_t* oU = ((uint16_t *) (yuv->data()[1] + (oy * yuv->stride()[1]))) + start_ox / 2;
+                       uint16_t* oV = ((uint16_t *) (yuv->data()[2] + (oy * yuv->stride()[2]))) + start_ox / 2;
+                       uint8_t* alpha = other->data()[0] + (oy * other->stride()[0]) + start_ox * 4;
+                       for (int tx = start_tx, ox = start_ox; tx < ts.width && ox < os.width; ++tx, ++ox) {
+                               float const a = float(alpha[3]) / 255;
+                               *tY = *oY * a + *tY * (1 - a);
+                               *tU = *oU * a + *tU * (1 - a);
+                               *tV = *oV * a + *tV * (1 - a);
+                               ++tY;
+                               ++oY;
+                               if (tx % 2) {
+                                       ++tU;
+                                       ++tV;
+                               }
+                               if (ox % 2) {
+                                       ++oU;
+                                       ++oV;
+                               }
+                               alpha += 4;
+                       }
+               }
                break;
        }
        default:
@@ -690,13 +831,11 @@ Image::bytes_per_pixel (int c) const
  *  @param p Pixel format.
  *  @param s Size in pixels.
  *  @param aligned true to make each row of this image aligned to a 32-byte boundary.
- *  @param extra_pixels Amount of extra "run-off" memory to allocate at the end of each plane in pixels.
  */
-Image::Image (AVPixelFormat p, dcp::Size s, bool aligned, int extra_pixels)
+Image::Image (AVPixelFormat p, dcp::Size s, bool aligned)
        : _size (s)
        , _pixel_format (p)
        , _aligned (aligned)
-       , _extra_pixels (extra_pixels)
 {
        allocate ();
 }
@@ -730,16 +869,45 @@ Image::allocate ()
                   over-reads by more then _avx.  I can't follow the code to work out how much,
                   so I'll just over-allocate by 32 bytes and have done with it.  Empirical
                   testing suggests that it works.
+
+                  In addition to these concerns, we may read/write as much as a whole extra line
+                  at the end of each plane in cases where we are messing with offsets in order to
+                  do pad or crop.  To solve this we over-allocate by an extra _stride[i] bytes.
+
+                  As an example: we may write to images starting at an offset so we get some padding.
+                  Hence we want to write in the following pattern:
+
+                  block start   write start                                  line end
+                  |..(padding)..|<------line-size------------->|..(padding)..|
+                  |..(padding)..|<------line-size------------->|..(padding)..|
+                  |..(padding)..|<------line-size------------->|..(padding)..|
+
+                  where line-size is of the smaller (inter_size) image and the full padded line length is that of
+                  out_size.  To get things to work we have to tell FFmpeg that the stride is that of out_size.
+                  However some parts of FFmpeg (notably rgb48Toxyz12 in swscale.c) process data for the full
+                  specified *stride*.  This does not matter until we get to the last line:
+
+                  block start   write start                                  line end
+                  |..(padding)..|<------line-size------------->|XXXwrittenXXX|
+                  |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXX|
+                  |XXXwrittenXXX|<------line-size------------->|XXXwrittenXXXXXXwrittenXXX
+                                                                              ^^^^ out of bounds
+               */
+               _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * (sample_size(i).height + 1) + 32);
+#if HAVE_VALGRIND_MEMCHECK_H
+               /* The data between the end of the line size and the stride is undefined but processed by
+                  libswscale, causing lots of valgrind errors.  Mark it all defined to quell these errors.
                */
-               _data[i] = (uint8_t *) wrapped_av_malloc (_stride[i] * sample_size(i).height + _extra_pixels * bytes_per_pixel(i) + 32);
+               VALGRIND_MAKE_MEM_DEFINED (_data[i], _stride[i] * (sample_size(i).height + 1) + 32);
+#endif
        }
 }
 
 Image::Image (Image const & other)
-       : _size (other._size)
+       : boost::enable_shared_from_this<Image>(other)
+       , _size (other._size)
        , _pixel_format (other._pixel_format)
        , _aligned (other._aligned)
-       , _extra_pixels (other._extra_pixels)
 {
        allocate ();
 
@@ -759,7 +927,6 @@ Image::Image (AVFrame* frame)
        : _size (frame->width, frame->height)
        , _pixel_format (static_cast<AVPixelFormat> (frame->format))
        , _aligned (true)
-       , _extra_pixels (0)
 {
        allocate ();
 
@@ -780,7 +947,6 @@ Image::Image (shared_ptr<const Image> other, bool aligned)
        : _size (other->_size)
        , _pixel_format (other->_pixel_format)
        , _aligned (aligned)
-       , _extra_pixels (other->_extra_pixels)
 {
        allocate ();
 
@@ -822,7 +988,6 @@ Image::swap (Image & other)
        }
 
        std::swap (_aligned, other._aligned);
-       std::swap (_extra_pixels, other._extra_pixels);
 }
 
 /** Destroy a Image */
@@ -926,49 +1091,61 @@ operator== (Image const & a, Image const & b)
 void
 Image::fade (float f)
 {
+       /* U/V black value for 8-bit colour */
+       static int const eight_bit_uv =    (1 << 7) - 1;
+       /* U/V black value for 10-bit colour */
+       static uint16_t const ten_bit_uv = (1 << 9) - 1;
+
        switch (_pixel_format) {
        case AV_PIX_FMT_YUV420P:
-       case AV_PIX_FMT_YUV422P:
-       case AV_PIX_FMT_YUV444P:
-       case AV_PIX_FMT_YUV411P:
-       case AV_PIX_FMT_YUVJ420P:
-       case AV_PIX_FMT_YUVJ422P:
-       case AV_PIX_FMT_YUVJ444P:
-       case AV_PIX_FMT_RGB24:
-       case AV_PIX_FMT_ARGB:
-       case AV_PIX_FMT_RGBA:
-       case AV_PIX_FMT_ABGR:
-       case AV_PIX_FMT_BGRA:
-       case AV_PIX_FMT_RGB555LE:
-               /* 8-bit */
-               for (int c = 0; c < 3; ++c) {
+       {
+               /* Y */
+               uint8_t* p = data()[0];
+               int const lines = sample_size(0).height;
+               for (int y = 0; y < lines; ++y) {
+                       uint8_t* q = p;
+                       for (int x = 0; x < line_size()[0]; ++x) {
+                               *q = int(float(*q) * f);
+                               ++q;
+                       }
+                       p += stride()[0];
+               }
+
+               /* U, V */
+               for (int c = 1; c < 3; ++c) {
                        uint8_t* p = data()[c];
                        int const lines = sample_size(c).height;
                        for (int y = 0; y < lines; ++y) {
                                uint8_t* q = p;
                                for (int x = 0; x < line_size()[c]; ++x) {
-                                       *q = int (float (*q) * f);
+                                       *q = eight_bit_uv + int((int(*q) - eight_bit_uv) * f);
                                        ++q;
                                }
                                p += stride()[c];
                        }
                }
+
                break;
+       }
+
+       case AV_PIX_FMT_RGB24:
+       {
+               /* 8-bit */
+               uint8_t* p = data()[0];
+               int const lines = sample_size(0).height;
+               for (int y = 0; y < lines; ++y) {
+                       uint8_t* q = p;
+                       for (int x = 0; x < line_size()[0]; ++x) {
+                               *q = int (float (*q) * f);
+                               ++q;
+                       }
+                       p += stride()[0];
+               }
+               break;
+       }
 
-       case AV_PIX_FMT_YUV422P9LE:
-       case AV_PIX_FMT_YUV444P9LE:
-       case AV_PIX_FMT_YUV422P10LE:
-       case AV_PIX_FMT_YUV444P10LE:
-       case AV_PIX_FMT_YUV422P16LE:
-       case AV_PIX_FMT_YUV444P16LE:
-       case AV_PIX_FMT_YUVA420P9LE:
-       case AV_PIX_FMT_YUVA422P9LE:
-       case AV_PIX_FMT_YUVA444P9LE:
-       case AV_PIX_FMT_YUVA420P10LE:
-       case AV_PIX_FMT_YUVA422P10LE:
-       case AV_PIX_FMT_YUVA444P10LE:
-       case AV_PIX_FMT_RGB48LE:
        case AV_PIX_FMT_XYZ12LE:
+       case AV_PIX_FMT_RGB48LE:
                /* 16-bit little-endian */
                for (int c = 0; c < 3; ++c) {
                        int const stride_pixels = stride()[c] / 2;
@@ -986,22 +1163,26 @@ Image::fade (float f)
                }
                break;
 
-       case AV_PIX_FMT_YUV422P9BE:
-       case AV_PIX_FMT_YUV444P9BE:
-       case AV_PIX_FMT_YUV444P10BE:
-       case AV_PIX_FMT_YUV422P10BE:
-       case AV_PIX_FMT_YUVA420P9BE:
-       case AV_PIX_FMT_YUVA422P9BE:
-       case AV_PIX_FMT_YUVA444P9BE:
-       case AV_PIX_FMT_YUVA420P10BE:
-       case AV_PIX_FMT_YUVA422P10BE:
-       case AV_PIX_FMT_YUVA444P10BE:
-       case AV_PIX_FMT_YUVA420P16BE:
-       case AV_PIX_FMT_YUVA422P16BE:
-       case AV_PIX_FMT_YUVA444P16BE:
-       case AV_PIX_FMT_RGB48BE:
-               /* 16-bit big-endian */
-               for (int c = 0; c < 3; ++c) {
+       case AV_PIX_FMT_YUV422P10LE:
+       {
+               /* Y */
+               {
+                       int const stride_pixels = stride()[0] / 2;
+                       int const line_size_pixels = line_size()[0] / 2;
+                       uint16_t* p = reinterpret_cast<uint16_t*> (data()[0]);
+                       int const lines = sample_size(0).height;
+                       for (int y = 0; y < lines; ++y) {
+                               uint16_t* q = p;
+                               for (int x = 0; x < line_size_pixels; ++x) {
+                                       *q = int(float(*q) * f);
+                                       ++q;
+                               }
+                               p += stride_pixels;
+                       }
+               }
+
+               /* U, V */
+               for (int c = 1; c < 3; ++c) {
                        int const stride_pixels = stride()[c] / 2;
                        int const line_size_pixels = line_size()[c] / 2;
                        uint16_t* p = reinterpret_cast<uint16_t*> (data()[c]);
@@ -1009,7 +1190,7 @@ Image::fade (float f)
                        for (int y = 0; y < lines; ++y) {
                                uint16_t* q = p;
                                for (int x = 0; x < line_size_pixels; ++x) {
-                                       *q = swap_16 (int (float (swap_16 (*q)) * f));
+                                       *q = ten_bit_uv + int((int(*q) - ten_bit_uv) * f);
                                        ++q;
                                }
                                p += stride_pixels;
@@ -1017,18 +1198,6 @@ Image::fade (float f)
                }
                break;
 
-       case AV_PIX_FMT_UYVY422:
-       {
-               int const Y = sample_size(0).height;
-               int const X = line_size()[0];
-               uint8_t* p = data()[0];
-               for (int y = 0; y < Y; ++y) {
-                       for (int x = 0; x < X; ++x) {
-                               *p = int (float (*p) * f);
-                               ++p;
-                       }
-               }
-               break;
        }
 
        default:
@@ -1036,8 +1205,8 @@ Image::fade (float f)
        }
 }
 
-shared_ptr<Image>
-Image::ensure_aligned (shared_ptr<Image> image)
+shared_ptr<const Image>
+Image::ensure_aligned (shared_ptr<const Image> image)
 {
        if (image->aligned()) {
                return image;
@@ -1045,3 +1214,110 @@ Image::ensure_aligned (shared_ptr<Image> image)
 
        return shared_ptr<Image> (new Image (image, true));
 }
+
+size_t
+Image::memory_used () const
+{
+       size_t m = 0;
+       for (int i = 0; i < planes(); ++i) {
+               m += _stride[i] * sample_size(i).height;
+       }
+       return m;
+}
+
+class Memory
+{
+public:
+       Memory ()
+               : data(0)
+               , size(0)
+       {}
+
+       ~Memory ()
+       {
+               free (data);
+       }
+
+       uint8_t* data;
+       size_t size;
+};
+
+static void
+png_write_data (png_structp png_ptr, png_bytep data, png_size_t length)
+{
+       Memory* mem = reinterpret_cast<Memory*>(png_get_io_ptr(png_ptr));
+       size_t size = mem->size + length;
+
+       if (mem->data) {
+               mem->data = reinterpret_cast<uint8_t*>(realloc(mem->data, size));
+       } else {
+               mem->data = reinterpret_cast<uint8_t*>(malloc(size));
+       }
+
+       if (!mem->data) {
+               throw EncodeError (N_("could not allocate memory for PNG"));
+       }
+
+       memcpy (mem->data + mem->size, data, length);
+       mem->size += length;
+}
+
+static void
+png_flush (png_structp)
+{
+
+}
+
+static void
+png_error_fn (png_structp png_ptr, char const * message)
+{
+       reinterpret_cast<Image*>(png_get_error_ptr(png_ptr))->png_error (message);
+}
+
+void
+Image::png_error (char const * message)
+{
+       throw EncodeError (String::compose ("Error during PNG write: %1", message));
+}
+
+dcp::Data
+Image::as_png () const
+{
+       DCPOMATIC_ASSERT (bytes_per_pixel(0) == 4);
+       DCPOMATIC_ASSERT (planes() == 1);
+       if (pixel_format() != AV_PIX_FMT_RGBA) {
+               return convert_pixel_format(dcp::YUV_TO_RGB_REC709, AV_PIX_FMT_RGBA, true, false)->as_png();
+       }
+
+       /* error handling? */
+       png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, reinterpret_cast<void*>(const_cast<Image*>(this)), png_error_fn, 0);
+       if (!png_ptr) {
+               throw EncodeError (N_("could not create PNG write struct"));
+       }
+
+       Memory state;
+
+       png_set_write_fn (png_ptr, &state, png_write_data, png_flush);
+
+       png_infop info_ptr = png_create_info_struct(png_ptr);
+       if (!info_ptr) {
+               png_destroy_write_struct (&png_ptr, &info_ptr);
+               throw EncodeError (N_("could not create PNG info struct"));
+       }
+
+       png_set_IHDR (png_ptr, info_ptr, size().width, size().height, 8, PNG_COLOR_TYPE_RGBA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
+
+       png_byte ** row_pointers = reinterpret_cast<png_byte **>(png_malloc(png_ptr, size().height * sizeof(png_byte *)));
+       for (int i = 0; i < size().height; ++i) {
+               row_pointers[i] = (png_byte *) (data()[0] + i * stride()[0]);
+       }
+
+       png_write_info (png_ptr, info_ptr);
+       png_write_image (png_ptr, row_pointers);
+       png_write_end (png_ptr, info_ptr);
+
+       png_destroy_write_struct (&png_ptr, &info_ptr);
+       png_free (png_ptr, row_pointers);
+
+       return dcp::Data (state.data, state.size);
+}