Re-organize internal/cgen/base/image-impl.c
diff --git a/internal/cgen/base/image-impl.c b/internal/cgen/base/image-impl.c
index 5921708..61ebd14 100644
--- a/internal/cgen/base/image-impl.c
+++ b/internal/cgen/base/image-impl.c
@@ -21,6 +21,52 @@
     0x08, 0x0A, 0x0C, 0x10, 0x18, 0x20, 0x30, 0x40,
 };
 
+// --------
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__squash_bgr_565_888(wuffs_base__slice_u8 dst,
+                                               wuffs_base__slice_u8 src) {
+  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+
+  size_t n = len4;
+  while (n--) {
+    uint32_t argb = wuffs_base__load_u32le__no_bounds_check(s);
+    uint32_t b5 = 0x1F & (argb >> (8 - 5));
+    uint32_t g6 = 0x3F & (argb >> (16 - 6));
+    uint32_t r5 = 0x1F & (argb >> (24 - 5));
+    wuffs_base__store_u32le__no_bounds_check(
+        d, (r5 << 11) | (g6 << 5) | (b5 << 0));
+    s += 4;
+    d += 4;
+  }
+  return len4 * 4;
+}
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__swap_rgbx_bgrx(wuffs_base__slice_u8 dst,
+                                           wuffs_base__slice_u8 src) {
+  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+
+  size_t n = len4;
+  while (n--) {
+    uint8_t b0 = s[0];
+    uint8_t b1 = s[1];
+    uint8_t b2 = s[2];
+    uint8_t b3 = s[3];
+    d[0] = b2;
+    d[1] = b1;
+    d[2] = b0;
+    d[3] = b3;
+    s += 4;
+    d += 4;
+  }
+  return len4 * 4;
+}
+
 static inline uint32_t  //
 wuffs_base__swap_u32_argb_abgr(uint32_t u) {
   uint32_t o = u & 0xFF00FF00;
@@ -29,6 +75,8 @@
   return o | (r >> 16) | (b << 16);
 }
 
+// --------
+
 static inline uint32_t  //
 wuffs_base__composite_nonpremul_nonpremul_u32_axxx(uint32_t dst_nonpremul,
                                                    uint32_t src_nonpremul) {
@@ -173,6 +221,8 @@
   return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);
 }
 
+// --------
+
 wuffs_base__color_u32_argb_premul  //
 wuffs_base__pixel_buffer__color_u32_at(const wuffs_base__pixel_buffer* pb,
                                        uint32_t x,
@@ -214,17 +264,9 @@
                                                   (4 * ((size_t)row[x]))));
     }
 
-    case WUFFS_BASE__PIXEL_FORMAT__BGR_565: {
-      uint16_t bgr =
-          wuffs_base__load_u16le__no_bounds_check(row + (2 * ((size_t)x)));
-      uint32_t b5 = 0x1F & (bgr >> 0);
-      uint32_t b = (b5 << 3) | (b5 >> 2);
-      uint32_t g6 = 0x3F & (bgr >> 5);
-      uint32_t g = (g6 << 2) | (g6 >> 4);
-      uint32_t r5 = 0x1F & (bgr >> 11);
-      uint32_t r = (r5 << 3) | (r5 >> 2);
-      return 0xFF000000 | (r << 16) | (g << 8) | (b << 0);
-    }
+    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:
+      return wuffs_base__color_u16_rgb_565__as__color_u32_argb_premul(
+          wuffs_base__load_u16le__no_bounds_check(row + (2 * ((size_t)x))));
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
       return 0xFF000000 |
              wuffs_base__load_u24le__no_bounds_check(row + (3 * ((size_t)x)));
@@ -301,15 +343,11 @@
                                  pb->pixcfg.private_impl.pixfmt, color));
       break;
 
-    case WUFFS_BASE__PIXEL_FORMAT__BGR_565: {
-      uint32_t b5 = 0x1F & (color >> (8 - 5));
-      uint32_t g6 = 0x3F & (color >> (16 - 6));
-      uint32_t r5 = 0x1F & (color >> (24 - 5));
-      uint32_t bgr565 = (b5 << 0) | (g6 << 5) | (r5 << 11);
-      wuffs_base__store_u16le__no_bounds_check(row + (2 * ((size_t)x)),
-                                               (uint16_t)bgr565);
+    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:
+      wuffs_base__store_u16le__no_bounds_check(
+          row + (2 * ((size_t)x)),
+          wuffs_base__color_u32_argb_premul__as__color_u16_rgb_565(color));
       break;
-    }
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
       wuffs_base__store_u24le__no_bounds_check(row + (3 * ((size_t)x)), color);
       break;
@@ -328,7 +366,6 @@
                                        wuffs_base__swap_u32_argb_abgr(color)));
       break;
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:
-      WUFFS_BASE__FALLTHROUGH;
     case WUFFS_BASE__PIXEL_FORMAT__RGBX:
       wuffs_base__store_u32le__no_bounds_check(
           row + (4 * ((size_t)x)), wuffs_base__swap_u32_argb_abgr(color));
@@ -401,96 +438,12 @@
     }
   }
 
-  return best_index;
+  return (uint8_t)best_index;
 }
 
 // --------
 
 static uint64_t  //
-wuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over(
-    wuffs_base__slice_u8 dst,
-    wuffs_base__slice_u8 dst_palette,
-    wuffs_base__slice_u8 src) {
-  size_t dst_len4 = dst.len / 4;
-  size_t src_len4 = src.len / 4;
-  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-  size_t n = len;
-
-  // TODO: unroll.
-
-  while (n >= 1) {
-    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
-    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
-    wuffs_base__store_u32le__no_bounds_check(
-        d + (0 * 4),
-        wuffs_base__composite_nonpremul_nonpremul_u32_axxx(d0, s0));
-
-    s += 1 * 4;
-    d += 1 * 4;
-    n -= 1;
-  }
-
-  return len;
-}
-
-static uint64_t  //
-wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src(
-    wuffs_base__slice_u8 dst,
-    wuffs_base__slice_u8 dst_palette,
-    wuffs_base__slice_u8 src) {
-  size_t dst_len4 = dst.len / 4;
-  size_t src_len4 = src.len / 4;
-  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-  size_t n = len;
-
-  // TODO: unroll.
-
-  while (n >= 1) {
-    uint32_t s0 = wuffs_base__premul_u32_axxx(
-        wuffs_base__load_u32le__no_bounds_check(s + (0 * 4)));
-    wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);
-
-    s += 1 * 4;
-    d += 1 * 4;
-    n -= 1;
-  }
-
-  return len;
-}
-
-static uint64_t  //
-wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over(
-    wuffs_base__slice_u8 dst,
-    wuffs_base__slice_u8 dst_palette,
-    wuffs_base__slice_u8 src) {
-  size_t dst_len4 = dst.len / 4;
-  size_t src_len4 = src.len / 4;
-  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-  size_t n = len;
-
-  // TODO: unroll.
-
-  while (n >= 1) {
-    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
-    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
-    wuffs_base__store_u32le__no_bounds_check(
-        d + (0 * 4), wuffs_base__composite_premul_nonpremul_u32_axxx(d0, s0));
-
-    s += 1 * 4;
-    d += 1 * 4;
-    n -= 1;
-  }
-
-  return len;
-}
-
-static uint64_t  //
 wuffs_base__pixel_swizzler__copy_1_1(wuffs_base__slice_u8 dst,
                                      wuffs_base__slice_u8 dst_palette,
                                      wuffs_base__slice_u8 src) {
@@ -510,10 +463,13 @@
   return len;
 }
 
+// --------
+
 static uint64_t  //
-wuffs_base__pixel_swizzler__xx__index__src(wuffs_base__slice_u8 dst,
-                                           wuffs_base__slice_u8 dst_palette,
-                                           wuffs_base__slice_u8 src) {
+wuffs_base__pixel_swizzler__bgr_565__index__src(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
   if (dst_palette.len != 1024) {
     return 0;
   }
@@ -557,6 +513,96 @@
   return len;
 }
 
+// --------
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
+  size_t dst_len4 = dst.len / 4;
+  size_t src_len4 = src.len / 4;
+  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+  size_t n = len;
+
+  // TODO: unroll.
+
+  while (n >= 1) {
+    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
+    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
+    wuffs_base__store_u32le__no_bounds_check(
+        d + (0 * 4),
+        wuffs_base__composite_nonpremul_nonpremul_u32_axxx(d0, s0));
+
+    s += 1 * 4;
+    d += 1 * 4;
+    n -= 1;
+  }
+
+  return len;
+}
+
+// --------
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
+  size_t dst_len4 = dst.len / 4;
+  size_t src_len4 = src.len / 4;
+  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+  size_t n = len;
+
+  // TODO: unroll.
+
+  while (n >= 1) {
+    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
+    wuffs_base__store_u32le__no_bounds_check(d + (0 * 4),
+                                             wuffs_base__premul_u32_axxx(s0));
+
+    s += 1 * 4;
+    d += 1 * 4;
+    n -= 1;
+  }
+
+  return len;
+}
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
+  size_t dst_len4 = dst.len / 4;
+  size_t src_len4 = src.len / 4;
+  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+  size_t n = len;
+
+  // TODO: unroll.
+
+  while (n >= 1) {
+    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
+    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
+    wuffs_base__store_u32le__no_bounds_check(
+        d + (0 * 4), wuffs_base__composite_premul_nonpremul_u32_axxx(d0, s0));
+
+    s += 1 * 4;
+    d += 1 * 4;
+    n -= 1;
+  }
+
+  return len;
+}
+
+// --------
+
 static uint64_t  //
 wuffs_base__pixel_swizzler__xxx__index__src(wuffs_base__slice_u8 dst,
                                             wuffs_base__slice_u8 dst_palette,
@@ -572,12 +618,13 @@
 
   const size_t loop_unroll_count = 4;
 
-  // The comparison in the while condition is ">", not ">=", because with ">=",
-  // the last 4-byte store could write past the end of the dst slice.
+  // The comparison in the while condition is ">", not ">=", because with
+  // ">=", the last 4-byte store could write past the end of the dst slice.
   //
-  // Each 4-byte store writes one too many bytes, but a subsequent store will
-  // overwrite that with the correct byte. There is always another store,
-  // whether a 4-byte store in this loop or a 1-byte store in the next loop.
+  // Each 4-byte store writes one too many bytes, but a subsequent store
+  // will overwrite that with the correct byte. There is always another
+  // store, whether a 4-byte store in this loop or a 1-byte store in the
+  // next loop.
   while (n > loop_unroll_count) {
     wuffs_base__store_u32le__no_bounds_check(
         d + (0 * 3), wuffs_base__load_u32le__no_bounds_check(
@@ -600,9 +647,7 @@
   while (n >= 1) {
     uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +
                                                           ((size_t)s[0] * 4));
-    d[0] = (uint8_t)(s0 >> 0);
-    d[1] = (uint8_t)(s0 >> 8);
-    d[2] = (uint8_t)(s0 >> 16);
+    wuffs_base__store_u24le__no_bounds_check(d + (0 * 3), s0);
 
     s += 1 * 1;
     d += 1 * 3;
@@ -670,6 +715,8 @@
   return len;
 }
 
+// --------
+
 static uint64_t  //
 wuffs_base__pixel_swizzler__xxxx__index__src(wuffs_base__slice_u8 dst,
                                              wuffs_base__slice_u8 dst_palette,
@@ -827,52 +874,6 @@
 
 // --------
 
-static uint64_t  //
-wuffs_base__pixel_swizzler__squash_bgr_565_888(wuffs_base__slice_u8 dst,
-                                               wuffs_base__slice_u8 src) {
-  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-
-  size_t n = len4;
-  while (n--) {
-    uint32_t argb = wuffs_base__load_u32le__no_bounds_check(s);
-    uint32_t b5 = 0x1F & (argb >> (8 - 5));
-    uint32_t g6 = 0x3F & (argb >> (16 - 6));
-    uint32_t r5 = 0x1F & (argb >> (24 - 5));
-    wuffs_base__store_u32le__no_bounds_check(
-        d, (b5 << 0) | (g6 << 5) | (r5 << 11));
-    s += 4;
-    d += 4;
-  }
-  return len4 * 4;
-}
-
-static uint64_t  //
-wuffs_base__pixel_swizzler__swap_rgbx_bgrx(wuffs_base__slice_u8 dst,
-                                           wuffs_base__slice_u8 src) {
-  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-
-  size_t n = len4;
-  while (n--) {
-    uint8_t b0 = s[0];
-    uint8_t b1 = s[1];
-    uint8_t b2 = s[2];
-    uint8_t b3 = s[3];
-    d[0] = b2;
-    d[1] = b1;
-    d[2] = b0;
-    d[3] = b3;
-    s += 4;
-    d += 4;
-  }
-  return len4 * 4;
-}
-
-// --------
-
 static wuffs_base__pixel_swizzler__func  //
 wuffs_base__pixel_swizzler__prepare__y(wuffs_base__pixel_swizzler* p,
                                        wuffs_base__pixel_format dst_format,
@@ -930,7 +931,7 @@
       }
       switch (blend) {
         case WUFFS_BASE__PIXEL_BLEND__SRC:
-          return wuffs_base__pixel_swizzler__xx__index__src;
+          return wuffs_base__pixel_swizzler__bgr_565__index__src;
       }
       return NULL;
 
@@ -1005,7 +1006,6 @@
       break;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
-    case WUFFS_BASE__PIXEL_FORMAT__RGB:
       // TODO.
       break;
 
@@ -1015,6 +1015,7 @@
     case WUFFS_BASE__PIXEL_FORMAT__BGRX:
       return wuffs_base__pixel_swizzler__xxxx__xxx;
 
+    case WUFFS_BASE__PIXEL_FORMAT__RGB:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:
@@ -1038,12 +1039,10 @@
       break;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
-    case WUFFS_BASE__PIXEL_FORMAT__RGB:
       // TODO.
       break;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:
-    case WUFFS_BASE__PIXEL_FORMAT__BGRX:
       switch (blend) {
         case WUFFS_BASE__PIXEL_BLEND__SRC:
           return wuffs_base__pixel_swizzler__copy_4_4;
@@ -1062,9 +1061,11 @@
       return NULL;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:
+    case WUFFS_BASE__PIXEL_FORMAT__BGRX:
       // TODO.
       break;
 
+    case WUFFS_BASE__PIXEL_FORMAT__RGB:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:
diff --git a/internal/cgen/base/image-public.h b/internal/cgen/base/image-public.h
index b235f2a..487f74a 100644
--- a/internal/cgen/base/image-public.h
+++ b/internal/cgen/base/image-public.h
@@ -21,6 +21,26 @@
 // 0xAARRGGBB (Alpha most significant, Blue least), regardless of endianness.
 typedef uint32_t wuffs_base__color_u32_argb_premul;
 
+static inline uint16_t  //
+wuffs_base__color_u32_argb_premul__as__color_u16_rgb_565(
+    wuffs_base__color_u32_argb_premul c) {
+  uint32_t r5 = 0xF800 & (c >> 8);
+  uint32_t g6 = 0x07E0 & (c >> 5);
+  uint32_t b5 = 0x001F & (c >> 3);
+  return (uint16_t)(r5 | g6 | b5);
+}
+
+static inline wuffs_base__color_u32_argb_premul  //
+wuffs_base__color_u16_rgb_565__as__color_u32_argb_premul(uint16_t c) {
+  uint32_t b5 = 0x1F & (c >> 0);
+  uint32_t b = (b5 << 3) | (b5 >> 2);
+  uint32_t g6 = 0x3F & (c >> 5);
+  uint32_t g = (g6 << 2) | (g6 >> 4);
+  uint32_t r5 = 0x1F & (c >> 11);
+  uint32_t r = (r5 << 3) | (r5 >> 2);
+  return 0xFF000000 | (r << 16) | (g << 8) | (b << 0);
+}
+
 static inline uint8_t  //
 wuffs_base__color_u32_argb_premul__as_gray(
     wuffs_base__color_u32_argb_premul c) {
@@ -35,7 +55,8 @@
   // Note that 19595 + 38470 + 7471 equals 65536, also known as (1 << 16). We
   // shift by 24, not just by 16, because the return value is 8-bit color, not
   // 16-bit color.
-  return ((19595 * cr) + (38470 * cg) + (7471 * cb) + 32768) >> 24;
+  uint32_t weighted_average = (19595 * cr) + (38470 * cg) + (7471 * cb) + 32768;
+  return (uint8_t)(weighted_average >> 24);
 }
 
 // wuffs_base__premul_u32_axxx converts from non-premultiplied alpha to
diff --git a/internal/cgen/data.go b/internal/cgen/data.go
index 6f49c3f..358f99e 100644
--- a/internal/cgen/data.go
+++ b/internal/cgen/data.go
@@ -27,46 +27,55 @@
 	""
 
 const baseImageImplC = "" +
-	"// ---------------- Images\n\nconst uint32_t wuffs_base__pixel_format__bits_per_channel[16] = {\n    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,\n    0x08, 0x0A, 0x0C, 0x10, 0x18, 0x20, 0x30, 0x40,\n};\n\nstatic inline uint32_t  //\nwuffs_base__swap_u32_argb_abgr(uint32_t u) {\n  uint32_t o = u & 0xFF00FF00;\n  uint32_t r = u & 0x00FF0000;\n  uint32_t b = u & 0x000000FF;\n  return o | (r >> 16) | (b << 16);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_nonpremul_nonpremul_u32_axxx(uint32_t dst_nonpremul,\n                                                   uint32_t src_nonpremul) {\n  // Convert from 8-bit color to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_nonpremul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_nonpremul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_nonpremul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_nonpremul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_nonpremul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_nonpremul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_nonpremul >> 8));\n  u" +
-	"int32_t db = 0x101 * (0xFF & (dst_nonpremul >> 0));\n\n  // Convert dst from nonpremul to premul.\n  dr = (dr * da) / 0xFFFF;\n  dg = (dg * da) / 0xFFFF;\n  db = (db * da) / 0xFFFF;\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (nonpremul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = ((sr * sa) + (dr * ia)) / 0xFFFF;\n  dg = ((sg * sa) + (dg * ia)) / 0xFFFF;\n  db = ((sb * sa) + (db * ia)) / 0xFFFF;\n\n  // Convert dst from premul to nonpremul.\n  if (da != 0) {\n    dr = (dr * 0xFFFF) / da;\n    dg = (dg * 0xFFFF) / da;\n    db = (db * 0xFFFF) / da;\n  }\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_nonpremul_premul_u32_axxx(uint32_t dst_nonpremul,\n                                                uint32_t src_premul) {\n  // Convert from 8-bit color" +
-	" to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_premul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_premul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_premul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_premul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_nonpremul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_nonpremul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_nonpremul >> 8));\n  uint32_t db = 0x101 * (0xFF & (dst_nonpremul >> 0));\n\n  // Convert dst from nonpremul to premul.\n  dr = (dr * da) / 0xFFFF;\n  dg = (dg * da) / 0xFFFF;\n  db = (db * da) / 0xFFFF;\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (premul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = sr + ((dr * ia) / 0xFFFF);\n  dg = sg + ((dg * ia) / 0xFFFF);\n  db = sb + ((db * ia) / 0xFFFF);\n\n  // Convert dst from premul to nonpremul.\n  if (da != 0) {\n    dr = (dr * 0xFFFF) / da;\n    dg = (dg * 0xFFFF) / da;\n    db = (db * 0xFFFF) / da;\n  }\n\n  // Con" +
-	"vert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_premul_nonpremul_u32_axxx(uint32_t dst_premul,\n                                                uint32_t src_nonpremul) {\n  // Convert from 8-bit color to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_nonpremul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_nonpremul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_nonpremul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_nonpremul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_premul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_premul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_premul >> 8));\n  uint32_t db = 0x101 * (0xFF & (dst_premul >> 0));\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (nonpremul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = ((sr" +
-	" * sa) + (dr * ia)) / 0xFFFF;\n  dg = ((sg * sa) + (dg * ia)) / 0xFFFF;\n  db = ((sb * sa) + (db * ia)) / 0xFFFF;\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_premul_premul_u32_axxx(uint32_t dst_premul,\n                                             uint32_t src_premul) {\n  // Convert from 8-bit color to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_premul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_premul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_premul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_premul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_premul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_premul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_premul >> 8));\n  uint32_t db = 0x101 * (0xFF & (dst_premul >> 0));\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - " +
-	"sa;\n\n  // Composite src (premul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = sr + ((dr * ia) / 0xFFFF);\n  dg = sg + ((dg * ia) / 0xFFFF);\n  db = sb + ((db * ia) / 0xFFFF);\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nwuffs_base__color_u32_argb_premul  //\nwuffs_base__pixel_buffer__color_u32_at(const wuffs_base__pixel_buffer* pb,\n                                       uint32_t x,\n                                       uint32_t y) {\n  if (!pb || (x >= pb->pixcfg.private_impl.width) ||\n      (y >= pb->pixcfg.private_impl.height)) {\n    return 0;\n  }\n\n  if (wuffs_base__pixel_format__is_planar(&pb->pixcfg.private_impl.pixfmt)) {\n    // TODO: support planar formats.\n    return 0;\n  }\n\n  size_t stride = pb->private_impl.planes[0].stride;\n  uint8_t* row = pb->private_impl.planes[0].ptr + (stride * ((size_t)y));\n\n  switch (pb->pixcfg.private_impl.pixfmt.repr) {\n    c" +
-	"ase WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n      return wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x)));\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY: {\n      uint8_t* palette = pb->private_impl.planes[3].ptr;\n      return wuffs_base__load_u32le__no_bounds_check(palette +\n                                                     (4 * ((size_t)row[x])));\n    }\n\n      // Common formats above. Rarer formats below.\n\n    case WUFFS_BASE__PIXEL_FORMAT__Y:\n      return 0xFF000000 | (0x00010101 * ((uint32_t)(row[x])));\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_NONPREMUL: {\n      uint8_t* palette = pb->private_impl.planes[3].ptr;\n      return wuffs_base__premul_u32_axxx(\n          wuffs_base__load_u32le__no_bounds_check(palette +\n                                                  (4 * ((size_t)row[x]))));\n    }\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565: {\n      uint16_t bgr =\n      " +
-	"    wuffs_base__load_u16le__no_bounds_check(row + (2 * ((size_t)x)));\n      uint32_t b5 = 0x1F & (bgr >> 0);\n      uint32_t b = (b5 << 3) | (b5 >> 2);\n      uint32_t g6 = 0x3F & (bgr >> 5);\n      uint32_t g = (g6 << 2) | (g6 >> 4);\n      uint32_t r5 = 0x1F & (bgr >> 11);\n      uint32_t r = (r5 << 3) | (r5 >> 2);\n      return 0xFF000000 | (r << 16) | (g << 8) | (b << 0);\n    }\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      return 0xFF000000 |\n             wuffs_base__load_u24le__no_bounds_check(row + (3 * ((size_t)x)));\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n      return wuffs_base__premul_u32_axxx(\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x))));\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      return 0xFF000000 |\n             wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x)));\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      return wuffs_base__swap_u32_argb_abgr(\n          0xFF000000 |\n          wuffs_base__load_u24le__no_bounds_check(row + (3 * ((size_t)x))))" +
-	";\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n      return wuffs_base__swap_u32_argb_abgr(wuffs_base__premul_u32_axxx(\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x)))));\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n      return wuffs_base__swap_u32_argb_abgr(\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x))));\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      return wuffs_base__swap_u32_argb_abgr(\n          0xFF000000 |\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x))));\n\n    default:\n      // TODO: support more formats.\n      break;\n  }\n\n  return 0;\n}\n\nwuffs_base__status  //\nwuffs_base__pixel_buffer__set_color_u32_at(\n    wuffs_base__pixel_buffer* pb,\n    uint32_t x,\n    uint32_t y,\n    wuffs_base__color_u32_argb_premul color) {\n  if (!pb) {\n    return wuffs_base__make_status(wuffs_base__error__bad_receiver);\n  }\n  if ((x >= pb->pixcfg.private_impl.width) ||\n      (y >= pb->pixc" +
-	"fg.private_impl.height)) {\n    return wuffs_base__make_status(wuffs_base__error__bad_argument);\n  }\n\n  if (wuffs_base__pixel_format__is_planar(&pb->pixcfg.private_impl.pixfmt)) {\n    // TODO: support planar formats.\n    return wuffs_base__make_status(wuffs_base__error__unsupported_option);\n  }\n\n  size_t stride = pb->private_impl.planes[0].stride;\n  uint8_t* row = pb->private_impl.planes[0].ptr + (stride * ((size_t)y));\n\n  switch (pb->pixcfg.private_impl.pixfmt.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      wuffs_base__store_u32le__no_bounds_check(row + (4 * ((size_t)x)), color);\n      break;\n\n      // Common formats above. Rarer formats below.\n\n    case WUFFS_BASE__PIXEL_FORMAT__Y:\n      wuffs_base__store_u8__no_bounds_check(\n          row + ((size_t)x), wuffs_base__color_u32_argb_premul__as_gray(color));\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY:\n      wuffs_base__store_u8__no_bounds_check(\n          row + ((size_t)x), wuffs_" +
-	"base__pixel_palette__closest_element(\n                                 wuffs_base__pixel_buffer__palette(pb),\n                                 pb->pixcfg.private_impl.pixfmt, color));\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565: {\n      uint32_t b5 = 0x1F & (color >> (8 - 5));\n      uint32_t g6 = 0x3F & (color >> (16 - 6));\n      uint32_t r5 = 0x1F & (color >> (24 - 5));\n      uint32_t bgr565 = (b5 << 0) | (g6 << 5) | (r5 << 11);\n      wuffs_base__store_u16le__no_bounds_check(row + (2 * ((size_t)x)),\n                                               (uint16_t)bgr565);\n      break;\n    }\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      wuffs_base__store_u24le__no_bounds_check(row + (3 * ((size_t)x)), color);\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n      wuffs_base__store_u32le__no_bounds_check(\n          row + (4 * ((size_t)x)), wuffs_base__nonpremul_u32_axxx(color));\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      wuffs_base__store_u24le__no_bounds_check(\n          r" +
-	"ow + (3 * ((size_t)x)), wuffs_base__swap_u32_argb_abgr(color));\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n      wuffs_base__store_u32le__no_bounds_check(\n          row + (4 * ((size_t)x)), wuffs_base__nonpremul_u32_axxx(\n                                       wuffs_base__swap_u32_argb_abgr(color)));\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n      WUFFS_BASE__FALLTHROUGH;\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      wuffs_base__store_u32le__no_bounds_check(\n          row + (4 * ((size_t)x)), wuffs_base__swap_u32_argb_abgr(color));\n      break;\n\n    default:\n      // TODO: support more formats.\n      return wuffs_base__make_status(wuffs_base__error__unsupported_option);\n  }\n\n  return wuffs_base__make_status(NULL);\n}\n\n" +
+	"// ---------------- Images\n\nconst uint32_t wuffs_base__pixel_format__bits_per_channel[16] = {\n    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,\n    0x08, 0x0A, 0x0C, 0x10, 0x18, 0x20, 0x30, 0x40,\n};\n\n" +
+	"" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__squash_bgr_565_888(wuffs_base__slice_u8 dst,\n                                               wuffs_base__slice_u8 src) {\n  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n\n  size_t n = len4;\n  while (n--) {\n    uint32_t argb = wuffs_base__load_u32le__no_bounds_check(s);\n    uint32_t b5 = 0x1F & (argb >> (8 - 5));\n    uint32_t g6 = 0x3F & (argb >> (16 - 6));\n    uint32_t r5 = 0x1F & (argb >> (24 - 5));\n    wuffs_base__store_u32le__no_bounds_check(\n        d, (r5 << 11) | (g6 << 5) | (b5 << 0));\n    s += 4;\n    d += 4;\n  }\n  return len4 * 4;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__swap_rgbx_bgrx(wuffs_base__slice_u8 dst,\n                                           wuffs_base__slice_u8 src) {\n  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n\n  size_t n = len4;\n  while (n--) {\n    uint8_t b0 = s[0];\n    uint8_t b1 = s[1];\n    u" +
+	"int8_t b2 = s[2];\n    uint8_t b3 = s[3];\n    d[0] = b2;\n    d[1] = b1;\n    d[2] = b0;\n    d[3] = b3;\n    s += 4;\n    d += 4;\n  }\n  return len4 * 4;\n}\n\nstatic inline uint32_t  //\nwuffs_base__swap_u32_argb_abgr(uint32_t u) {\n  uint32_t o = u & 0xFF00FF00;\n  uint32_t r = u & 0x00FF0000;\n  uint32_t b = u & 0x000000FF;\n  return o | (r >> 16) | (b << 16);\n}\n\n" +
+	"" +
+	"// --------\n\nstatic inline uint32_t  //\nwuffs_base__composite_nonpremul_nonpremul_u32_axxx(uint32_t dst_nonpremul,\n                                                   uint32_t src_nonpremul) {\n  // Convert from 8-bit color to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_nonpremul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_nonpremul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_nonpremul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_nonpremul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_nonpremul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_nonpremul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_nonpremul >> 8));\n  uint32_t db = 0x101 * (0xFF & (dst_nonpremul >> 0));\n\n  // Convert dst from nonpremul to premul.\n  dr = (dr * da) / 0xFFFF;\n  dg = (dg * da) / 0xFFFF;\n  db = (db * da) / 0xFFFF;\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (nonpremul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = ((sr * sa) + (dr * i" +
+	"a)) / 0xFFFF;\n  dg = ((sg * sa) + (dg * ia)) / 0xFFFF;\n  db = ((sb * sa) + (db * ia)) / 0xFFFF;\n\n  // Convert dst from premul to nonpremul.\n  if (da != 0) {\n    dr = (dr * 0xFFFF) / da;\n    dg = (dg * 0xFFFF) / da;\n    db = (db * 0xFFFF) / da;\n  }\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_nonpremul_premul_u32_axxx(uint32_t dst_nonpremul,\n                                                uint32_t src_premul) {\n  // Convert from 8-bit color to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_premul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_premul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_premul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_premul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_nonpremul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_nonpremul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_nonpremul >> 8))" +
+	";\n  uint32_t db = 0x101 * (0xFF & (dst_nonpremul >> 0));\n\n  // Convert dst from nonpremul to premul.\n  dr = (dr * da) / 0xFFFF;\n  dg = (dg * da) / 0xFFFF;\n  db = (db * da) / 0xFFFF;\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (premul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = sr + ((dr * ia) / 0xFFFF);\n  dg = sg + ((dg * ia) / 0xFFFF);\n  db = sb + ((db * ia) / 0xFFFF);\n\n  // Convert dst from premul to nonpremul.\n  if (da != 0) {\n    dr = (dr * 0xFFFF) / da;\n    dg = (dg * 0xFFFF) / da;\n    db = (db * 0xFFFF) / da;\n  }\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_premul_nonpremul_u32_axxx(uint32_t dst_premul,\n                                                uint32_t src_nonpremul) {\n  // Convert from 8-bit color to 16-bit color.\n " +
+	" uint32_t sa = 0x101 * (0xFF & (src_nonpremul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_nonpremul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_nonpremul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_nonpremul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_premul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_premul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_premul >> 8));\n  uint32_t db = 0x101 * (0xFF & (dst_premul >> 0));\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (nonpremul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = ((sr * sa) + (dr * ia)) / 0xFFFF;\n  dg = ((sg * sa) + (dg * ia)) / 0xFFFF;\n  db = ((sb * sa) + (db * ia)) / 0xFFFF;\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\nstatic inline uint32_t  //\nwuffs_base__composite_premul_premul_u32_axxx(uint32_t dst_premul,\n    " +
+	"                                         uint32_t src_premul) {\n  // Convert from 8-bit color to 16-bit color.\n  uint32_t sa = 0x101 * (0xFF & (src_premul >> 24));\n  uint32_t sr = 0x101 * (0xFF & (src_premul >> 16));\n  uint32_t sg = 0x101 * (0xFF & (src_premul >> 8));\n  uint32_t sb = 0x101 * (0xFF & (src_premul >> 0));\n  uint32_t da = 0x101 * (0xFF & (dst_premul >> 24));\n  uint32_t dr = 0x101 * (0xFF & (dst_premul >> 16));\n  uint32_t dg = 0x101 * (0xFF & (dst_premul >> 8));\n  uint32_t db = 0x101 * (0xFF & (dst_premul >> 0));\n\n  // Calculate the inverse of the src-alpha: how much of the dst to keep.\n  uint32_t ia = 0xFFFF - sa;\n\n  // Composite src (premul) over dst (premul).\n  da = sa + ((da * ia) / 0xFFFF);\n  dr = sr + ((dr * ia) / 0xFFFF);\n  dg = sg + ((dg * ia) / 0xFFFF);\n  db = sb + ((db * ia) / 0xFFFF);\n\n  // Convert from 16-bit color to 8-bit color and combine the components.\n  da >>= 8;\n  dr >>= 8;\n  dg >>= 8;\n  db >>= 8;\n  return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);\n}\n\n" +
+	"" +
+	"// --------\n\nwuffs_base__color_u32_argb_premul  //\nwuffs_base__pixel_buffer__color_u32_at(const wuffs_base__pixel_buffer* pb,\n                                       uint32_t x,\n                                       uint32_t y) {\n  if (!pb || (x >= pb->pixcfg.private_impl.width) ||\n      (y >= pb->pixcfg.private_impl.height)) {\n    return 0;\n  }\n\n  if (wuffs_base__pixel_format__is_planar(&pb->pixcfg.private_impl.pixfmt)) {\n    // TODO: support planar formats.\n    return 0;\n  }\n\n  size_t stride = pb->private_impl.planes[0].stride;\n  uint8_t* row = pb->private_impl.planes[0].ptr + (stride * ((size_t)y));\n\n  switch (pb->pixcfg.private_impl.pixfmt.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n      return wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x)));\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY: {\n      uint8_t* palette = pb->private_impl.planes[3].ptr;\n      return wu" +
+	"ffs_base__load_u32le__no_bounds_check(palette +\n                                                     (4 * ((size_t)row[x])));\n    }\n\n      // Common formats above. Rarer formats below.\n\n    case WUFFS_BASE__PIXEL_FORMAT__Y:\n      return 0xFF000000 | (0x00010101 * ((uint32_t)(row[x])));\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_NONPREMUL: {\n      uint8_t* palette = pb->private_impl.planes[3].ptr;\n      return wuffs_base__premul_u32_axxx(\n          wuffs_base__load_u32le__no_bounds_check(palette +\n                                                  (4 * ((size_t)row[x]))));\n    }\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      return wuffs_base__color_u16_rgb_565__as__color_u32_argb_premul(\n          wuffs_base__load_u16le__no_bounds_check(row + (2 * ((size_t)x))));\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      return 0xFF000000 |\n             wuffs_base__load_u24le__no_bounds_check(row + (3 * ((size_t)x)));\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n      return wuffs_base__premul_u32_axxx(\n  " +
+	"        wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x))));\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      return 0xFF000000 |\n             wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x)));\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      return wuffs_base__swap_u32_argb_abgr(\n          0xFF000000 |\n          wuffs_base__load_u24le__no_bounds_check(row + (3 * ((size_t)x))));\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n      return wuffs_base__swap_u32_argb_abgr(wuffs_base__premul_u32_axxx(\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x)))));\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n      return wuffs_base__swap_u32_argb_abgr(\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x))));\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      return wuffs_base__swap_u32_argb_abgr(\n          0xFF000000 |\n          wuffs_base__load_u32le__no_bounds_check(row + (4 * ((size_t)x))));\n\n    defaul" +
+	"t:\n      // TODO: support more formats.\n      break;\n  }\n\n  return 0;\n}\n\nwuffs_base__status  //\nwuffs_base__pixel_buffer__set_color_u32_at(\n    wuffs_base__pixel_buffer* pb,\n    uint32_t x,\n    uint32_t y,\n    wuffs_base__color_u32_argb_premul color) {\n  if (!pb) {\n    return wuffs_base__make_status(wuffs_base__error__bad_receiver);\n  }\n  if ((x >= pb->pixcfg.private_impl.width) ||\n      (y >= pb->pixcfg.private_impl.height)) {\n    return wuffs_base__make_status(wuffs_base__error__bad_argument);\n  }\n\n  if (wuffs_base__pixel_format__is_planar(&pb->pixcfg.private_impl.pixfmt)) {\n    // TODO: support planar formats.\n    return wuffs_base__make_status(wuffs_base__error__unsupported_option);\n  }\n\n  size_t stride = pb->private_impl.planes[0].stride;\n  uint8_t* row = pb->private_impl.planes[0].ptr + (stride * ((size_t)y));\n\n  switch (pb->pixcfg.private_impl.pixfmt.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      wuffs_base__store_u32le__no_bounds_check(row + (4 *" +
+	" ((size_t)x)), color);\n      break;\n\n      // Common formats above. Rarer formats below.\n\n    case WUFFS_BASE__PIXEL_FORMAT__Y:\n      wuffs_base__store_u8__no_bounds_check(\n          row + ((size_t)x), wuffs_base__color_u32_argb_premul__as_gray(color));\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY:\n      wuffs_base__store_u8__no_bounds_check(\n          row + ((size_t)x), wuffs_base__pixel_palette__closest_element(\n                                 wuffs_base__pixel_buffer__palette(pb),\n                                 pb->pixcfg.private_impl.pixfmt, color));\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      wuffs_base__store_u16le__no_bounds_check(\n          row + (2 * ((size_t)x)),\n          wuffs_base__color_u32_argb_premul__as__color_u16_rgb_565(color));\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      wuffs_base__store_u24le__no_bounds_check(row + (3 * ((size_t)x)), color);\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n      wuffs_base__sto" +
+	"re_u32le__no_bounds_check(\n          row + (4 * ((size_t)x)), wuffs_base__nonpremul_u32_axxx(color));\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      wuffs_base__store_u24le__no_bounds_check(\n          row + (3 * ((size_t)x)), wuffs_base__swap_u32_argb_abgr(color));\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n      wuffs_base__store_u32le__no_bounds_check(\n          row + (4 * ((size_t)x)), wuffs_base__nonpremul_u32_axxx(\n                                       wuffs_base__swap_u32_argb_abgr(color)));\n      break;\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      wuffs_base__store_u32le__no_bounds_check(\n          row + (4 * ((size_t)x)), wuffs_base__swap_u32_argb_abgr(color));\n      break;\n\n    default:\n      // TODO: support more formats.\n      return wuffs_base__make_status(wuffs_base__error__unsupported_option);\n  }\n\n  return wuffs_base__make_status(NULL);\n}\n\n" +
 	"" +
 	"// --------\n\nuint8_t  //\nwuffs_base__pixel_palette__closest_element(\n    wuffs_base__slice_u8 palette_slice,\n    wuffs_base__pixel_format palette_format,\n    wuffs_base__color_u32_argb_premul c) {\n  size_t n = palette_slice.len / 4;\n  if (n > 256) {\n    n = 256;\n  }\n  size_t best_index = 0;\n  uint64_t best_score = 0xFFFFFFFFFFFFFFFF;\n\n  // Work in 16-bit color.\n  uint32_t ca = 0x101 * (0xFF & (c >> 24));\n  uint32_t cr = 0x101 * (0xFF & (c >> 16));\n  uint32_t cg = 0x101 * (0xFF & (c >> 8));\n  uint32_t cb = 0x101 * (0xFF & (c >> 0));\n\n  switch (palette_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY: {\n      bool nonpremul = palette_format.repr ==\n                       WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_NONPREMUL;\n\n      size_t i;\n      for (i = 0; i < n; i++) {\n        // Work in 16-bit color.\n        uint32_t pb = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 0]))" +
-	";\n        uint32_t pg = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 1]));\n        uint32_t pr = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 2]));\n        uint32_t pa = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 3]));\n\n        // Convert to premultiplied alpha.\n        if (nonpremul && (pa != 0xFFFF)) {\n          pb = (pb * pa) / 0xFFFF;\n          pg = (pg * pa) / 0xFFFF;\n          pr = (pr * pa) / 0xFFFF;\n        }\n\n        // These deltas are conceptually int32_t (signed) but after squaring,\n        // it's equivalent to work in uint32_t (unsigned).\n        pb -= cb;\n        pg -= cg;\n        pr -= cr;\n        pa -= ca;\n        uint64_t score = ((uint64_t)(pb * pb)) + ((uint64_t)(pg * pg)) +\n                         ((uint64_t)(pr * pr)) + ((uint64_t)(pa * pa));\n        if (best_score > score) {\n          best_score = score;\n          best_index = i;\n        }\n      }\n      break;\n    }\n  }\n\n  return best_index;\n}\n\n" +
+	";\n        uint32_t pg = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 1]));\n        uint32_t pr = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 2]));\n        uint32_t pa = 0x101 * ((uint32_t)(palette_slice.ptr[(4 * i) + 3]));\n\n        // Convert to premultiplied alpha.\n        if (nonpremul && (pa != 0xFFFF)) {\n          pb = (pb * pa) / 0xFFFF;\n          pg = (pg * pa) / 0xFFFF;\n          pr = (pr * pa) / 0xFFFF;\n        }\n\n        // These deltas are conceptually int32_t (signed) but after squaring,\n        // it's equivalent to work in uint32_t (unsigned).\n        pb -= cb;\n        pg -= cg;\n        pr -= cr;\n        pa -= ca;\n        uint64_t score = ((uint64_t)(pb * pb)) + ((uint64_t)(pg * pg)) +\n                         ((uint64_t)(pr * pr)) + ((uint64_t)(pa * pa));\n        if (best_score > score) {\n          best_score = score;\n          best_index = i;\n        }\n      }\n      break;\n    }\n  }\n\n  return (uint8_t)best_index;\n}\n\n" +
 	"" +
-	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4),\n        wuffs_base__composite_nonpremul_nonpremul_u32_axxx(d0, s0));\n\n    s += 1 * 4;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len /" +
-	" 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__premul_u32_axxx(\n        wuffs_base__load_u32le__no_bounds_check(s + (0 * 4)));\n    wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);\n\n    s += 1 * 4;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));\n    wuffs_base__store_u32le__no_bounds_check(\n        d" +
-	" + (0 * 4), wuffs_base__composite_premul_nonpremul_u32_axxx(d0, s0));\n\n    s += 1 * 4;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__copy_1_1(wuffs_base__slice_u8 dst,\n                                     wuffs_base__slice_u8 dst_palette,\n                                     wuffs_base__slice_u8 src) {\n  return wuffs_base__slice_u8__copy_from_slice(dst, src);\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__copy_4_4(wuffs_base__slice_u8 dst,\n                                     wuffs_base__slice_u8 dst_palette,\n                                     wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  if (len > 0) {\n    memmove(dst.ptr, src.ptr, len * 4);\n  }\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xx__index__src(wuffs_base__slice_u8 dst,\n                                           wuffs_base__slice_u8 dst_palette,\n                    " +
-	"                       wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len2 = dst.len / 2;\n  size_t len = dst_len2 < src.len ? dst_len2 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (0 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (1 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[1] * 4)));\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (2 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[2] * 4)));\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (3 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[3] * 4)));\n" +
-	"\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 2;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (0 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n\n    s += 1 * 1;\n    d += 1 * 2;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxx__index__src(wuffs_base__slice_u8 dst,\n                                            wuffs_base__slice_u8 dst_palette,\n                                            wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len3 = dst.len / 3;\n  size_t len = dst_len3 < src.len ? dst_len3 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  // The comparison in the while condition is \">\", not \">=\", because with \">=\",\n  // the last 4-byte store could write past the end of the dst slice.\n  //\n  // Each 4-byte store wri" +
-	"tes one too many bytes, but a subsequent store will\n  // overwrite that with the correct byte. There is always another store,\n  // whether a 4-byte store in this loop or a 1-byte store in the next loop.\n  while (n > loop_unroll_count) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (1 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[1] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (2 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[2] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (3 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[3] * 4)));\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 3;\n    n -= loop_unroll_count;\n  " +
-	"}\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    d[0] = (uint8_t)(s0 >> 0);\n    d[1] = (uint8_t)(s0 >> 8);\n    d[2] = (uint8_t)(s0 >> 16);\n\n    s += 1 * 1;\n    d += 1 * 3;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxx__index_binary_alpha__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len3 = dst.len / 3;\n  size_t len = dst_len3 < src.len ? dst_len3 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u24le__no_bounds_check(d + (0" +
-	" * 4), s0);\n    }\n    uint32_t s1 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[1] * 4));\n    if (s1) {\n      wuffs_base__store_u24le__no_bounds_check(d + (1 * 4), s1);\n    }\n    uint32_t s2 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[2] * 4));\n    if (s2) {\n      wuffs_base__store_u24le__no_bounds_check(d + (2 * 4), s2);\n    }\n    uint32_t s3 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[3] * 4));\n    if (s3) {\n      wuffs_base__store_u24le__no_bounds_check(d + (3 * 4), s3);\n    }\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 3;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n" +
-	"      wuffs_base__store_u24le__no_bounds_check(d + (0 * 4), s0);\n    }\n\n    s += 1 * 1;\n    d += 1 * 3;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__index__src(wuffs_base__slice_u8 dst,\n                                             wuffs_base__slice_u8 dst_palette,\n                                             wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len4 = dst.len / 4;\n  size_t len = dst_len4 < src.len ? dst_len4 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (1 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[1] * 4)));\n    wuffs_base__" +
-	"store_u32le__no_bounds_check(\n        d + (2 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[2] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (3 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[3] * 4)));\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 4;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n\n    s += 1 * 1;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__index_binary_alpha__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len4 = dst.len / 4;\n  size_t len = dst_len4 < src.len ? dst_len4 : src.len;\n  uint8_t* d " +
-	"= dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);\n    }\n    uint32_t s1 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[1] * 4));\n    if (s1) {\n      wuffs_base__store_u32le__no_bounds_check(d + (1 * 4), s1);\n    }\n    uint32_t s2 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[2] * 4));\n    if (s2) {\n      wuffs_base__store_u32le__no_bounds_check(d + (2 * 4), s2);\n    }\n    uint32_t s3 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[3] * 4));\n    if (s3) {\n      w" +
-	"uffs_base__store_u32le__no_bounds_check(d + (3 * 4), s3);\n    }\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 4;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);\n    }\n\n    s += 1 * 1;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__xxx(wuffs_base__slice_u8 dst,\n                                      wuffs_base__slice_u8 dst_palette,\n                                      wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len3 = src.len / 3;\n  size_t len = dst_len4 < src_len3 ? dst_len4 : src_len3;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4),\n        0xFF00000" +
-	"0 | wuffs_base__load_u24le__no_bounds_check(s + (0 * 3)));\n\n    s += 1 * 3;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__y(wuffs_base__slice_u8 dst,\n                                    wuffs_base__slice_u8 dst_palette,\n                                    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t len = dst_len4 < src.len ? dst_len4 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), 0xFF000000 | (0x010101 * (uint32_t)s[0]));\n\n    s += 1 * 1;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\n" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__copy_1_1(wuffs_base__slice_u8 dst,\n                                     wuffs_base__slice_u8 dst_palette,\n                                     wuffs_base__slice_u8 src) {\n  return wuffs_base__slice_u8__copy_from_slice(dst, src);\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__copy_4_4(wuffs_base__slice_u8 dst,\n                                     wuffs_base__slice_u8 dst_palette,\n                                     wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  if (len > 0) {\n    memmove(dst.ptr, src.ptr, len * 4);\n  }\n  return len;\n}\n\n" +
 	"" +
-	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__squash_bgr_565_888(wuffs_base__slice_u8 dst,\n                                               wuffs_base__slice_u8 src) {\n  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n\n  size_t n = len4;\n  while (n--) {\n    uint32_t argb = wuffs_base__load_u32le__no_bounds_check(s);\n    uint32_t b5 = 0x1F & (argb >> (8 - 5));\n    uint32_t g6 = 0x3F & (argb >> (16 - 6));\n    uint32_t r5 = 0x1F & (argb >> (24 - 5));\n    wuffs_base__store_u32le__no_bounds_check(\n        d, (b5 << 0) | (g6 << 5) | (r5 << 11));\n    s += 4;\n    d += 4;\n  }\n  return len4 * 4;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__swap_rgbx_bgrx(wuffs_base__slice_u8 dst,\n                                           wuffs_base__slice_u8 src) {\n  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n\n  size_t n = len4;\n  while (n--) {\n    uint8_t b0 = s[0];\n    uint8_t b1 = s[1];\n    u" +
-	"int8_t b2 = s[2];\n    uint8_t b3 = s[3];\n    d[0] = b2;\n    d[1] = b1;\n    d[2] = b0;\n    d[3] = b3;\n    s += 4;\n    d += 4;\n  }\n  return len4 * 4;\n}\n\n" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgr_565__index__src(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len2 = dst.len / 2;\n  size_t len = dst_len2 < src.len ? dst_len2 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (0 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (1 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[1] * 4)));\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (2 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[2] * 4)));\n    wuffs_base__store_u16le__no_bounds_che" +
+	"ck(\n        d + (3 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[3] * 4)));\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 2;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    wuffs_base__store_u16le__no_bounds_check(\n        d + (0 * 2), wuffs_base__load_u16le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n\n    s += 1 * 1;\n    d += 1 * 2;\n    n -= 1;\n  }\n\n  return len;\n}\n\n" +
+	"" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4),\n        wuffs_base__composite_nonpremul_nonpremul_u32_axxx(d0, s0));\n\n    s += 1 * 4;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\n" +
+	"" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));\n    wuffs_base__store_u32le__no_bounds_check(d + (0 * 4),\n                                             wuffs_base__premul_u32_axxx(s0));\n\n    s += 1 * 4;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len4 = src.len / 4;\n  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;\n  uint8_t* d" +
+	" = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), wuffs_base__composite_premul_nonpremul_u32_axxx(d0, s0));\n\n    s += 1 * 4;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\n" +
+	"" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxx__index__src(wuffs_base__slice_u8 dst,\n                                            wuffs_base__slice_u8 dst_palette,\n                                            wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len3 = dst.len / 3;\n  size_t len = dst_len3 < src.len ? dst_len3 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  // The comparison in the while condition is \">\", not \">=\", because with\n  // \">=\", the last 4-byte store could write past the end of the dst slice.\n  //\n  // Each 4-byte store writes one too many bytes, but a subsequent store\n  // will overwrite that with the correct byte. There is always another\n  // store, whether a 4-byte store in this loop or a 1-byte store in the\n  // next loop.\n  while (n > loop_unroll_count) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 3), wuffs_base__load_u32le__no_bounds_c" +
+	"heck(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (1 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[1] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (2 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[2] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (3 * 3), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[3] * 4)));\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 3;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    wuffs_base__store_u24le__no_bounds_check(d + (0 * 3), s0);\n\n    s += 1 * 1;\n    d += 1 * 3;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__" +
+	"pixel_swizzler__xxx__index_binary_alpha__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len3 = dst.len / 3;\n  size_t len = dst_len3 < src.len ? dst_len3 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u24le__no_bounds_check(d + (0 * 4), s0);\n    }\n    uint32_t s1 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[1] * 4));\n    if (s1) {\n      wuffs_base__store_u24le__no_bounds_check(d + (1 * 4), s1);\n    }\n    uint32_t s2 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                    " +
+	"                      ((size_t)s[2] * 4));\n    if (s2) {\n      wuffs_base__store_u24le__no_bounds_check(d + (2 * 4), s2);\n    }\n    uint32_t s3 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[3] * 4));\n    if (s3) {\n      wuffs_base__store_u24le__no_bounds_check(d + (3 * 4), s3);\n    }\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 3;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u24le__no_bounds_check(d + (0 * 4), s0);\n    }\n\n    s += 1 * 1;\n    d += 1 * 3;\n    n -= 1;\n  }\n\n  return len;\n}\n\n" +
+	"" +
+	"// --------\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__index__src(wuffs_base__slice_u8 dst,\n                                             wuffs_base__slice_u8 dst_palette,\n                                             wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len4 = dst.len / 4;\n  size_t len = dst_len4 < src.len ? dst_len4 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (1 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[1] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (2 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_pale" +
+	"tte.ptr + ((size_t)s[2] * 4)));\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (3 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[3] * 4)));\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count * 4;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), wuffs_base__load_u32le__no_bounds_check(\n                         dst_palette.ptr + ((size_t)s[0] * 4)));\n\n    s += 1 * 1;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__index_binary_alpha__src_over(\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  if (dst_palette.len != 1024) {\n    return 0;\n  }\n  size_t dst_len4 = dst.len / 4;\n  size_t len = dst_len4 < src.len ? dst_len4 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  const size_t loop_unroll_count = 4;\n\n  while (n >= loop_unroll_count)" +
+	" {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);\n    }\n    uint32_t s1 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[1] * 4));\n    if (s1) {\n      wuffs_base__store_u32le__no_bounds_check(d + (1 * 4), s1);\n    }\n    uint32_t s2 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[2] * 4));\n    if (s2) {\n      wuffs_base__store_u32le__no_bounds_check(d + (2 * 4), s2);\n    }\n    uint32_t s3 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[3] * 4));\n    if (s3) {\n      wuffs_base__store_u32le__no_bounds_check(d + (3 * 4), s3);\n    }\n\n    s += loop_unroll_count * 1;\n    d += loop_unroll_count *" +
+	" 4;\n    n -= loop_unroll_count;\n  }\n\n  while (n >= 1) {\n    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +\n                                                          ((size_t)s[0] * 4));\n    if (s0) {\n      wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);\n    }\n\n    s += 1 * 1;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__xxx(wuffs_base__slice_u8 dst,\n                                      wuffs_base__slice_u8 dst_palette,\n                                      wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t src_len3 = src.len / 3;\n  size_t len = dst_len4 < src_len3 ? dst_len4 : src_len3;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4),\n        0xFF000000 | wuffs_base__load_u24le__no_bounds_check(s + (0 * 3)));\n\n    s += 1 * 3;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n" +
+	"\nstatic uint64_t  //\nwuffs_base__pixel_swizzler__xxxx__y(wuffs_base__slice_u8 dst,\n                                    wuffs_base__slice_u8 dst_palette,\n                                    wuffs_base__slice_u8 src) {\n  size_t dst_len4 = dst.len / 4;\n  size_t len = dst_len4 < src.len ? dst_len4 : src.len;\n  uint8_t* d = dst.ptr;\n  uint8_t* s = src.ptr;\n  size_t n = len;\n\n  // TODO: unroll.\n\n  while (n >= 1) {\n    wuffs_base__store_u32le__no_bounds_check(\n        d + (0 * 4), 0xFF000000 | (0x010101 * (uint32_t)s[0]));\n\n    s += 1 * 1;\n    d += 1 * 4;\n    n -= 1;\n  }\n\n  return len;\n}\n\n" +
 	"" +
 	"// --------\n\nstatic wuffs_base__pixel_swizzler__func  //\nwuffs_base__pixel_swizzler__prepare__y(wuffs_base__pixel_swizzler* p,\n                                       wuffs_base__pixel_format dst_format,\n                                       wuffs_base__slice_u8 dst_palette,\n                                       wuffs_base__slice_u8 src_palette,\n                                       wuffs_base__pixel_blend blend) {\n  switch (dst_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      return wuf" +
 	"fs_base__pixel_swizzler__xxxx__y;\n  }\n  return NULL;\n}\n\nstatic wuffs_base__pixel_swizzler__func  //\nwuffs_base__pixel_swizzler__prepare__indexed__bgra_binary(\n    wuffs_base__pixel_swizzler* p,\n    wuffs_base__pixel_format dst_format,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src_palette,\n    wuffs_base__pixel_blend blend) {\n  switch (dst_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY:\n      if (wuffs_base__slice_u8__copy_from_slice(dst_palette, src_palette) !=\n          1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__copy_1_1;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      if (wuffs_base__pixel_swizzler__squash_bgr_565_888(dst_palette,\n                                                         src_palette) != 1024) {\n     " +
-	"   return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xx__index__src;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      if (wuffs_base__slice_u8__copy_from_slice(dst_palette, src_palette) !=\n          1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxx__index__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n      if (wuffs_base__slice_u8__copy_from_slice(dst_palette, src_palette) !=\n          1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxxx__index__sr" +
-	"c;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      if (wuffs_base__pixel_swizzler__swap_rgbx_bgrx(dst_palette,\n                                                     src_palette) != 1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxx__index__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n      if (wuffs_base__pixel_swizzler__swap_rgbx_bgrx(dst_palette,\n                                                     src_palette) != 1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__P" +
-	"IXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxxx__index__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n  }\n  return NULL;\n}\n\nstatic wuffs_base__pixel_swizzler__func  //\nwuffs_base__pixel_swizzler__prepare__bgr(wuffs_base__pixel_swizzler* p,\n                                         wuffs_base__pixel_format dst_format,\n                                         wuffs_base__slice_u8 dst_palette,\n                                         wuffs_base__slice_u8 src_palette,\n                                         wuffs_base__pixel_blend blend) {\n  switch (dst_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGR" +
-	"A_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      return wuffs_base__pixel_swizzler__xxxx__xxx;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      // TODO.\n      break;\n  }\n  return NULL;\n}\n\nstatic wuffs_base__pixel_swizzler__func  //\nwuffs_base__pixel_swizzler__prepare__bgra_nonpremul(\n    wuffs_base__pixel_swizzler* p,\n    wuffs_base__pixel_format dst_format,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src_palette,\n    wuffs_base__pixel_blend blend) {\n  switch (dst_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base" +
-	"__pixel_swizzler__copy_4_4;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      // TODO.\n      break;\n  }\n  return NULL;\n}\n\n" +
+	"   return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__bgr_565__index__src;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      if (wuffs_base__slice_u8__copy_from_slice(dst_palette, src_palette) !=\n          1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxx__index__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n      if (wuffs_base__slice_u8__copy_from_slice(dst_palette, src_palette) !=\n          1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxxx__inde" +
+	"x__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n      if (wuffs_base__pixel_swizzler__swap_rgbx_bgrx(dst_palette,\n                                                     src_palette) != 1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxx__index__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n      if (wuffs_base__pixel_swizzler__swap_rgbx_bgrx(dst_palette,\n                                                     src_palette) != 1024) {\n        return NULL;\n      }\n      switch (blend) {\n        case WUFFS_BA" +
+	"SE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__xxxx__index__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__xxxx__index_binary_alpha__src_over;\n      }\n      return NULL;\n  }\n  return NULL;\n}\n\nstatic wuffs_base__pixel_swizzler__func  //\nwuffs_base__pixel_swizzler__prepare__bgr(wuffs_base__pixel_swizzler* p,\n                                         wuffs_base__pixel_format dst_format,\n                                         wuffs_base__slice_u8 dst_palette,\n                                         wuffs_base__slice_u8 src_palette,\n                                         wuffs_base__pixel_blend blend) {\n  switch (dst_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n    case WUFFS_BASE__PIXE" +
+	"L_FORMAT__BGRX:\n      return wuffs_base__pixel_swizzler__xxxx__xxx;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      // TODO.\n      break;\n  }\n  return NULL;\n}\n\nstatic wuffs_base__pixel_swizzler__func  //\nwuffs_base__pixel_swizzler__prepare__bgra_nonpremul(\n    wuffs_base__pixel_swizzler* p,\n    wuffs_base__pixel_format dst_format,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src_palette,\n    wuffs_base__pixel_blend blend) {\n  switch (dst_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__copy_4_4;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n" +
+	"          return wuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_PREMUL:\n      switch (blend) {\n        case WUFFS_BASE__PIXEL_BLEND__SRC:\n          return wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src;\n        case WUFFS_BASE__PIXEL_BLEND__SRC_OVER:\n          return wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over;\n      }\n      return NULL;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__BGRX:\n      // TODO.\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__RGB:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:\n    case WUFFS_BASE__PIXEL_FORMAT__RGBX:\n      // TODO.\n      break;\n  }\n  return NULL;\n}\n\n" +
 	"" +
 	"// --------\n\nwuffs_base__status  //\nwuffs_base__pixel_swizzler__prepare(wuffs_base__pixel_swizzler* p,\n                                    wuffs_base__pixel_format dst_format,\n                                    wuffs_base__slice_u8 dst_palette,\n                                    wuffs_base__pixel_format src_format,\n                                    wuffs_base__slice_u8 src_palette,\n                                    wuffs_base__pixel_blend blend) {\n  if (!p) {\n    return wuffs_base__make_status(wuffs_base__error__bad_receiver);\n  }\n\n  // TODO: support many more formats.\n\n  wuffs_base__pixel_swizzler__func func = NULL;\n\n  switch (src_format.repr) {\n    case WUFFS_BASE__PIXEL_FORMAT__Y:\n      func = wuffs_base__pixel_swizzler__prepare__y(p, dst_format, dst_palette,\n                                                    src_palette, blend);\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__INDEXED__BGRA_BINARY:\n      func = wuffs_base__pixel_swizzler__prepare__indexed__bgra_binary(\n          p, dst_format, dst_" +
 	"palette, src_palette, blend);\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGR:\n      func = wuffs_base__pixel_swizzler__prepare__bgr(\n          p, dst_format, dst_palette, src_palette, blend);\n      break;\n\n    case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:\n      func = wuffs_base__pixel_swizzler__prepare__bgra_nonpremul(\n          p, dst_format, dst_palette, src_palette, blend);\n      break;\n  }\n\n  p->private_impl.func = func;\n  return wuffs_base__make_status(\n      func ? NULL : wuffs_base__error__unsupported_pixel_swizzler_option);\n}\n\nuint64_t  //\nwuffs_base__pixel_swizzler__swizzle_interleaved(\n    const wuffs_base__pixel_swizzler* p,\n    wuffs_base__slice_u8 dst,\n    wuffs_base__slice_u8 dst_palette,\n    wuffs_base__slice_u8 src) {\n  if (p && p->private_impl.func) {\n    return (*p->private_impl.func)(dst, dst_palette, src);\n  }\n  return 0;\n}\n" +
@@ -235,9 +244,10 @@
 	""
 
 const baseImagePublicH = "" +
-	"// ---------------- Images\n\n// wuffs_base__color_u32_argb_premul is an 8 bit per channel premultiplied\n// Alpha, Red, Green, Blue color, as a uint32_t value. Its value is always\n// 0xAARRGGBB (Alpha most significant, Blue least), regardless of endianness.\ntypedef uint32_t wuffs_base__color_u32_argb_premul;\n\nstatic inline uint8_t  //\nwuffs_base__color_u32_argb_premul__as_gray(\n    wuffs_base__color_u32_argb_premul c) {\n  // Work in 16-bit color.\n  uint32_t cr = 0x101 * (0xFF & (c >> 16));\n  uint32_t cg = 0x101 * (0xFF & (c >> 8));\n  uint32_t cb = 0x101 * (0xFF & (c >> 0));\n\n  // These coefficients (the fractions 0.299, 0.587 and 0.114) are the same\n  // as those given by the JFIF specification.\n  //\n  // Note that 19595 + 38470 + 7471 equals 65536, also known as (1 << 16). We\n  // shift by 24, not just by 16, because the return value is 8-bit color, not\n  // 16-bit color.\n  return ((19595 * cr) + (38470 * cg) + (7471 * cb) + 32768) >> 24;\n}\n\n// wuffs_base__premul_u32_axxx converts from non-premultiplied alpha " +
-	"to\n// premultiplied alpha. The \"axxx\" means either \"argb\" or \"abgr\".\nstatic inline uint32_t  //\nwuffs_base__premul_u32_axxx(uint32_t nonpremul) {\n  // Multiplying by 0x101 (twice, once for alpha and once for color) converts\n  // from 8-bit to 16-bit color. Shifting right by 8 undoes that.\n  //\n  // Working in the higher bit depth can produce slightly different (and\n  // arguably slightly more accurate) results. For example, given 8-bit blue\n  // and alpha of 0x80 and 0x81:\n  //\n  //  - ((0x80   * 0x81  ) / 0xFF  )      = 0x40        = 0x40\n  //  - ((0x8080 * 0x8181) / 0xFFFF) >> 8 = 0x4101 >> 8 = 0x41\n  uint32_t a = 0xFF & (nonpremul >> 24);\n  uint32_t a16 = a * (0x101 * 0x101);\n\n  uint32_t r = 0xFF & (nonpremul >> 16);\n  r = ((r * a16) / 0xFFFF) >> 8;\n  uint32_t g = 0xFF & (nonpremul >> 8);\n  g = ((g * a16) / 0xFFFF) >> 8;\n  uint32_t b = 0xFF & (nonpremul >> 0);\n  b = ((b * a16) / 0xFFFF) >> 8;\n\n  return (a << 24) | (r << 16) | (g << 8) | (b << 0);\n}\n\n// wuffs_base__nonpremul_u32_axxx converts from premultip" +
-	"lied alpha to\n// non-premultiplied alpha. The \"axxx\" means either \"argb\" or \"abgr\".\nstatic inline uint32_t  //\nwuffs_base__nonpremul_u32_axxx(uint32_t premul) {\n  uint32_t a = 0xFF & (premul >> 24);\n  if (a == 0xFF) {\n    return premul;\n  } else if (a == 0) {\n    return 0;\n  }\n  uint32_t a16 = a * 0x101;\n\n  uint32_t r = 0xFF & (premul >> 16);\n  r = ((r * (0x101 * 0xFFFF)) / a16) >> 8;\n  uint32_t g = 0xFF & (premul >> 8);\n  g = ((g * (0x101 * 0xFFFF)) / a16) >> 8;\n  uint32_t b = 0xFF & (premul >> 0);\n  b = ((b * (0x101 * 0xFFFF)) / a16) >> 8;\n\n  return (a << 24) | (r << 16) | (g << 8) | (b << 0);\n}\n\n" +
+	"// ---------------- Images\n\n// wuffs_base__color_u32_argb_premul is an 8 bit per channel premultiplied\n// Alpha, Red, Green, Blue color, as a uint32_t value. Its value is always\n// 0xAARRGGBB (Alpha most significant, Blue least), regardless of endianness.\ntypedef uint32_t wuffs_base__color_u32_argb_premul;\n\nstatic inline uint16_t  //\nwuffs_base__color_u32_argb_premul__as__color_u16_rgb_565(\n    wuffs_base__color_u32_argb_premul c) {\n  uint32_t r5 = 0xF800 & (c >> 8);\n  uint32_t g6 = 0x07E0 & (c >> 5);\n  uint32_t b5 = 0x001F & (c >> 3);\n  return (uint16_t)(r5 | g6 | b5);\n}\n\nstatic inline wuffs_base__color_u32_argb_premul  //\nwuffs_base__color_u16_rgb_565__as__color_u32_argb_premul(uint16_t c) {\n  uint32_t b5 = 0x1F & (c >> 0);\n  uint32_t b = (b5 << 3) | (b5 >> 2);\n  uint32_t g6 = 0x3F & (c >> 5);\n  uint32_t g = (g6 << 2) | (g6 >> 4);\n  uint32_t r5 = 0x1F & (c >> 11);\n  uint32_t r = (r5 << 3) | (r5 >> 2);\n  return 0xFF000000 | (r << 16) | (g << 8) | (b << 0);\n}\n\nstatic inline uint8_t  //\nwuffs_base__color_u32_a" +
+	"rgb_premul__as_gray(\n    wuffs_base__color_u32_argb_premul c) {\n  // Work in 16-bit color.\n  uint32_t cr = 0x101 * (0xFF & (c >> 16));\n  uint32_t cg = 0x101 * (0xFF & (c >> 8));\n  uint32_t cb = 0x101 * (0xFF & (c >> 0));\n\n  // These coefficients (the fractions 0.299, 0.587 and 0.114) are the same\n  // as those given by the JFIF specification.\n  //\n  // Note that 19595 + 38470 + 7471 equals 65536, also known as (1 << 16). We\n  // shift by 24, not just by 16, because the return value is 8-bit color, not\n  // 16-bit color.\n  uint32_t weighted_average = (19595 * cr) + (38470 * cg) + (7471 * cb) + 32768;\n  return (uint8_t)(weighted_average >> 24);\n}\n\n// wuffs_base__premul_u32_axxx converts from non-premultiplied alpha to\n// premultiplied alpha. The \"axxx\" means either \"argb\" or \"abgr\".\nstatic inline uint32_t  //\nwuffs_base__premul_u32_axxx(uint32_t nonpremul) {\n  // Multiplying by 0x101 (twice, once for alpha and once for color) converts\n  // from 8-bit to 16-bit color. Shifting right by 8 undoes that.\n  //\n  // W" +
+	"orking in the higher bit depth can produce slightly different (and\n  // arguably slightly more accurate) results. For example, given 8-bit blue\n  // and alpha of 0x80 and 0x81:\n  //\n  //  - ((0x80   * 0x81  ) / 0xFF  )      = 0x40        = 0x40\n  //  - ((0x8080 * 0x8181) / 0xFFFF) >> 8 = 0x4101 >> 8 = 0x41\n  uint32_t a = 0xFF & (nonpremul >> 24);\n  uint32_t a16 = a * (0x101 * 0x101);\n\n  uint32_t r = 0xFF & (nonpremul >> 16);\n  r = ((r * a16) / 0xFFFF) >> 8;\n  uint32_t g = 0xFF & (nonpremul >> 8);\n  g = ((g * a16) / 0xFFFF) >> 8;\n  uint32_t b = 0xFF & (nonpremul >> 0);\n  b = ((b * a16) / 0xFFFF) >> 8;\n\n  return (a << 24) | (r << 16) | (g << 8) | (b << 0);\n}\n\n// wuffs_base__nonpremul_u32_axxx converts from premultiplied alpha to\n// non-premultiplied alpha. The \"axxx\" means either \"argb\" or \"abgr\".\nstatic inline uint32_t  //\nwuffs_base__nonpremul_u32_axxx(uint32_t premul) {\n  uint32_t a = 0xFF & (premul >> 24);\n  if (a == 0xFF) {\n    return premul;\n  } else if (a == 0) {\n    return 0;\n  }\n  uint32_t a16 = a * 0x" +
+	"101;\n\n  uint32_t r = 0xFF & (premul >> 16);\n  r = ((r * (0x101 * 0xFFFF)) / a16) >> 8;\n  uint32_t g = 0xFF & (premul >> 8);\n  g = ((g * (0x101 * 0xFFFF)) / a16) >> 8;\n  uint32_t b = 0xFF & (premul >> 0);\n  b = ((b * (0x101 * 0xFFFF)) / a16) >> 8;\n\n  return (a << 24) | (r << 16) | (g << 8) | (b << 0);\n}\n\n" +
 	"" +
 	"// --------\n\ntypedef uint8_t wuffs_base__pixel_blend;\n\n// wuffs_base__pixel_blend encodes how to blend source and destination pixels,\n// accounting for transparency. It encompasses the Porter-Duff compositing\n// operators as well as the other blending modes defined by PDF.\n//\n// TODO: implement the other modes.\n#define WUFFS_BASE__PIXEL_BLEND__SRC ((wuffs_base__pixel_blend)0)\n#define WUFFS_BASE__PIXEL_BLEND__SRC_OVER ((wuffs_base__pixel_blend)1)\n\n" +
 	"" +
diff --git a/release/c/wuffs-unsupported-snapshot.c b/release/c/wuffs-unsupported-snapshot.c
index 876c908..5e0fe7e 100644
--- a/release/c/wuffs-unsupported-snapshot.c
+++ b/release/c/wuffs-unsupported-snapshot.c
@@ -2507,6 +2507,26 @@
 // 0xAARRGGBB (Alpha most significant, Blue least), regardless of endianness.
 typedef uint32_t wuffs_base__color_u32_argb_premul;
 
+static inline uint16_t  //
+wuffs_base__color_u32_argb_premul__as__color_u16_rgb_565(
+    wuffs_base__color_u32_argb_premul c) {
+  uint32_t r5 = 0xF800 & (c >> 8);
+  uint32_t g6 = 0x07E0 & (c >> 5);
+  uint32_t b5 = 0x001F & (c >> 3);
+  return (uint16_t)(r5 | g6 | b5);
+}
+
+static inline wuffs_base__color_u32_argb_premul  //
+wuffs_base__color_u16_rgb_565__as__color_u32_argb_premul(uint16_t c) {
+  uint32_t b5 = 0x1F & (c >> 0);
+  uint32_t b = (b5 << 3) | (b5 >> 2);
+  uint32_t g6 = 0x3F & (c >> 5);
+  uint32_t g = (g6 << 2) | (g6 >> 4);
+  uint32_t r5 = 0x1F & (c >> 11);
+  uint32_t r = (r5 << 3) | (r5 >> 2);
+  return 0xFF000000 | (r << 16) | (g << 8) | (b << 0);
+}
+
 static inline uint8_t  //
 wuffs_base__color_u32_argb_premul__as_gray(
     wuffs_base__color_u32_argb_premul c) {
@@ -2521,7 +2541,8 @@
   // Note that 19595 + 38470 + 7471 equals 65536, also known as (1 << 16). We
   // shift by 24, not just by 16, because the return value is 8-bit color, not
   // 16-bit color.
-  return ((19595 * cr) + (38470 * cg) + (7471 * cb) + 32768) >> 24;
+  uint32_t weighted_average = (19595 * cr) + (38470 * cg) + (7471 * cb) + 32768;
+  return (uint8_t)(weighted_average >> 24);
 }
 
 // wuffs_base__premul_u32_axxx converts from non-premultiplied alpha to
@@ -8506,6 +8527,52 @@
     0x08, 0x0A, 0x0C, 0x10, 0x18, 0x20, 0x30, 0x40,
 };
 
+// --------
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__squash_bgr_565_888(wuffs_base__slice_u8 dst,
+                                               wuffs_base__slice_u8 src) {
+  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+
+  size_t n = len4;
+  while (n--) {
+    uint32_t argb = wuffs_base__load_u32le__no_bounds_check(s);
+    uint32_t b5 = 0x1F & (argb >> (8 - 5));
+    uint32_t g6 = 0x3F & (argb >> (16 - 6));
+    uint32_t r5 = 0x1F & (argb >> (24 - 5));
+    wuffs_base__store_u32le__no_bounds_check(
+        d, (r5 << 11) | (g6 << 5) | (b5 << 0));
+    s += 4;
+    d += 4;
+  }
+  return len4 * 4;
+}
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__swap_rgbx_bgrx(wuffs_base__slice_u8 dst,
+                                           wuffs_base__slice_u8 src) {
+  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+
+  size_t n = len4;
+  while (n--) {
+    uint8_t b0 = s[0];
+    uint8_t b1 = s[1];
+    uint8_t b2 = s[2];
+    uint8_t b3 = s[3];
+    d[0] = b2;
+    d[1] = b1;
+    d[2] = b0;
+    d[3] = b3;
+    s += 4;
+    d += 4;
+  }
+  return len4 * 4;
+}
+
 static inline uint32_t  //
 wuffs_base__swap_u32_argb_abgr(uint32_t u) {
   uint32_t o = u & 0xFF00FF00;
@@ -8514,6 +8581,8 @@
   return o | (r >> 16) | (b << 16);
 }
 
+// --------
+
 static inline uint32_t  //
 wuffs_base__composite_nonpremul_nonpremul_u32_axxx(uint32_t dst_nonpremul,
                                                    uint32_t src_nonpremul) {
@@ -8658,6 +8727,8 @@
   return (db << 0) | (dg << 8) | (dr << 16) | (da << 24);
 }
 
+// --------
+
 wuffs_base__color_u32_argb_premul  //
 wuffs_base__pixel_buffer__color_u32_at(const wuffs_base__pixel_buffer* pb,
                                        uint32_t x,
@@ -8699,17 +8770,9 @@
                                                   (4 * ((size_t)row[x]))));
     }
 
-    case WUFFS_BASE__PIXEL_FORMAT__BGR_565: {
-      uint16_t bgr =
-          wuffs_base__load_u16le__no_bounds_check(row + (2 * ((size_t)x)));
-      uint32_t b5 = 0x1F & (bgr >> 0);
-      uint32_t b = (b5 << 3) | (b5 >> 2);
-      uint32_t g6 = 0x3F & (bgr >> 5);
-      uint32_t g = (g6 << 2) | (g6 >> 4);
-      uint32_t r5 = 0x1F & (bgr >> 11);
-      uint32_t r = (r5 << 3) | (r5 >> 2);
-      return 0xFF000000 | (r << 16) | (g << 8) | (b << 0);
-    }
+    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:
+      return wuffs_base__color_u16_rgb_565__as__color_u32_argb_premul(
+          wuffs_base__load_u16le__no_bounds_check(row + (2 * ((size_t)x))));
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
       return 0xFF000000 |
              wuffs_base__load_u24le__no_bounds_check(row + (3 * ((size_t)x)));
@@ -8786,15 +8849,11 @@
                                  pb->pixcfg.private_impl.pixfmt, color));
       break;
 
-    case WUFFS_BASE__PIXEL_FORMAT__BGR_565: {
-      uint32_t b5 = 0x1F & (color >> (8 - 5));
-      uint32_t g6 = 0x3F & (color >> (16 - 6));
-      uint32_t r5 = 0x1F & (color >> (24 - 5));
-      uint32_t bgr565 = (b5 << 0) | (g6 << 5) | (r5 << 11);
-      wuffs_base__store_u16le__no_bounds_check(row + (2 * ((size_t)x)),
-                                               (uint16_t)bgr565);
+    case WUFFS_BASE__PIXEL_FORMAT__BGR_565:
+      wuffs_base__store_u16le__no_bounds_check(
+          row + (2 * ((size_t)x)),
+          wuffs_base__color_u32_argb_premul__as__color_u16_rgb_565(color));
       break;
-    }
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
       wuffs_base__store_u24le__no_bounds_check(row + (3 * ((size_t)x)), color);
       break;
@@ -8813,7 +8872,6 @@
                                        wuffs_base__swap_u32_argb_abgr(color)));
       break;
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:
-      WUFFS_BASE__FALLTHROUGH;
     case WUFFS_BASE__PIXEL_FORMAT__RGBX:
       wuffs_base__store_u32le__no_bounds_check(
           row + (4 * ((size_t)x)), wuffs_base__swap_u32_argb_abgr(color));
@@ -8886,96 +8944,12 @@
     }
   }
 
-  return best_index;
+  return (uint8_t)best_index;
 }
 
 // --------
 
 static uint64_t  //
-wuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over(
-    wuffs_base__slice_u8 dst,
-    wuffs_base__slice_u8 dst_palette,
-    wuffs_base__slice_u8 src) {
-  size_t dst_len4 = dst.len / 4;
-  size_t src_len4 = src.len / 4;
-  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-  size_t n = len;
-
-  // TODO: unroll.
-
-  while (n >= 1) {
-    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
-    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
-    wuffs_base__store_u32le__no_bounds_check(
-        d + (0 * 4),
-        wuffs_base__composite_nonpremul_nonpremul_u32_axxx(d0, s0));
-
-    s += 1 * 4;
-    d += 1 * 4;
-    n -= 1;
-  }
-
-  return len;
-}
-
-static uint64_t  //
-wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src(
-    wuffs_base__slice_u8 dst,
-    wuffs_base__slice_u8 dst_palette,
-    wuffs_base__slice_u8 src) {
-  size_t dst_len4 = dst.len / 4;
-  size_t src_len4 = src.len / 4;
-  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-  size_t n = len;
-
-  // TODO: unroll.
-
-  while (n >= 1) {
-    uint32_t s0 = wuffs_base__premul_u32_axxx(
-        wuffs_base__load_u32le__no_bounds_check(s + (0 * 4)));
-    wuffs_base__store_u32le__no_bounds_check(d + (0 * 4), s0);
-
-    s += 1 * 4;
-    d += 1 * 4;
-    n -= 1;
-  }
-
-  return len;
-}
-
-static uint64_t  //
-wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over(
-    wuffs_base__slice_u8 dst,
-    wuffs_base__slice_u8 dst_palette,
-    wuffs_base__slice_u8 src) {
-  size_t dst_len4 = dst.len / 4;
-  size_t src_len4 = src.len / 4;
-  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-  size_t n = len;
-
-  // TODO: unroll.
-
-  while (n >= 1) {
-    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
-    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
-    wuffs_base__store_u32le__no_bounds_check(
-        d + (0 * 4), wuffs_base__composite_premul_nonpremul_u32_axxx(d0, s0));
-
-    s += 1 * 4;
-    d += 1 * 4;
-    n -= 1;
-  }
-
-  return len;
-}
-
-static uint64_t  //
 wuffs_base__pixel_swizzler__copy_1_1(wuffs_base__slice_u8 dst,
                                      wuffs_base__slice_u8 dst_palette,
                                      wuffs_base__slice_u8 src) {
@@ -8995,10 +8969,13 @@
   return len;
 }
 
+// --------
+
 static uint64_t  //
-wuffs_base__pixel_swizzler__xx__index__src(wuffs_base__slice_u8 dst,
-                                           wuffs_base__slice_u8 dst_palette,
-                                           wuffs_base__slice_u8 src) {
+wuffs_base__pixel_swizzler__bgr_565__index__src(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
   if (dst_palette.len != 1024) {
     return 0;
   }
@@ -9042,6 +9019,96 @@
   return len;
 }
 
+// --------
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__bgra_nonpremul__bgra_nonpremul__src_over(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
+  size_t dst_len4 = dst.len / 4;
+  size_t src_len4 = src.len / 4;
+  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+  size_t n = len;
+
+  // TODO: unroll.
+
+  while (n >= 1) {
+    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
+    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
+    wuffs_base__store_u32le__no_bounds_check(
+        d + (0 * 4),
+        wuffs_base__composite_nonpremul_nonpremul_u32_axxx(d0, s0));
+
+    s += 1 * 4;
+    d += 1 * 4;
+    n -= 1;
+  }
+
+  return len;
+}
+
+// --------
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
+  size_t dst_len4 = dst.len / 4;
+  size_t src_len4 = src.len / 4;
+  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+  size_t n = len;
+
+  // TODO: unroll.
+
+  while (n >= 1) {
+    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
+    wuffs_base__store_u32le__no_bounds_check(d + (0 * 4),
+                                             wuffs_base__premul_u32_axxx(s0));
+
+    s += 1 * 4;
+    d += 1 * 4;
+    n -= 1;
+  }
+
+  return len;
+}
+
+static uint64_t  //
+wuffs_base__pixel_swizzler__bgra_premul__bgra_nonpremul__src_over(
+    wuffs_base__slice_u8 dst,
+    wuffs_base__slice_u8 dst_palette,
+    wuffs_base__slice_u8 src) {
+  size_t dst_len4 = dst.len / 4;
+  size_t src_len4 = src.len / 4;
+  size_t len = dst_len4 < src_len4 ? dst_len4 : src_len4;
+  uint8_t* d = dst.ptr;
+  uint8_t* s = src.ptr;
+  size_t n = len;
+
+  // TODO: unroll.
+
+  while (n >= 1) {
+    uint32_t d0 = wuffs_base__load_u32le__no_bounds_check(d + (0 * 4));
+    uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(s + (0 * 4));
+    wuffs_base__store_u32le__no_bounds_check(
+        d + (0 * 4), wuffs_base__composite_premul_nonpremul_u32_axxx(d0, s0));
+
+    s += 1 * 4;
+    d += 1 * 4;
+    n -= 1;
+  }
+
+  return len;
+}
+
+// --------
+
 static uint64_t  //
 wuffs_base__pixel_swizzler__xxx__index__src(wuffs_base__slice_u8 dst,
                                             wuffs_base__slice_u8 dst_palette,
@@ -9057,12 +9124,13 @@
 
   const size_t loop_unroll_count = 4;
 
-  // The comparison in the while condition is ">", not ">=", because with ">=",
-  // the last 4-byte store could write past the end of the dst slice.
+  // The comparison in the while condition is ">", not ">=", because with
+  // ">=", the last 4-byte store could write past the end of the dst slice.
   //
-  // Each 4-byte store writes one too many bytes, but a subsequent store will
-  // overwrite that with the correct byte. There is always another store,
-  // whether a 4-byte store in this loop or a 1-byte store in the next loop.
+  // Each 4-byte store writes one too many bytes, but a subsequent store
+  // will overwrite that with the correct byte. There is always another
+  // store, whether a 4-byte store in this loop or a 1-byte store in the
+  // next loop.
   while (n > loop_unroll_count) {
     wuffs_base__store_u32le__no_bounds_check(
         d + (0 * 3), wuffs_base__load_u32le__no_bounds_check(
@@ -9085,9 +9153,7 @@
   while (n >= 1) {
     uint32_t s0 = wuffs_base__load_u32le__no_bounds_check(dst_palette.ptr +
                                                           ((size_t)s[0] * 4));
-    d[0] = (uint8_t)(s0 >> 0);
-    d[1] = (uint8_t)(s0 >> 8);
-    d[2] = (uint8_t)(s0 >> 16);
+    wuffs_base__store_u24le__no_bounds_check(d + (0 * 3), s0);
 
     s += 1 * 1;
     d += 1 * 3;
@@ -9155,6 +9221,8 @@
   return len;
 }
 
+// --------
+
 static uint64_t  //
 wuffs_base__pixel_swizzler__xxxx__index__src(wuffs_base__slice_u8 dst,
                                              wuffs_base__slice_u8 dst_palette,
@@ -9312,52 +9380,6 @@
 
 // --------
 
-static uint64_t  //
-wuffs_base__pixel_swizzler__squash_bgr_565_888(wuffs_base__slice_u8 dst,
-                                               wuffs_base__slice_u8 src) {
-  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-
-  size_t n = len4;
-  while (n--) {
-    uint32_t argb = wuffs_base__load_u32le__no_bounds_check(s);
-    uint32_t b5 = 0x1F & (argb >> (8 - 5));
-    uint32_t g6 = 0x3F & (argb >> (16 - 6));
-    uint32_t r5 = 0x1F & (argb >> (24 - 5));
-    wuffs_base__store_u32le__no_bounds_check(
-        d, (b5 << 0) | (g6 << 5) | (r5 << 11));
-    s += 4;
-    d += 4;
-  }
-  return len4 * 4;
-}
-
-static uint64_t  //
-wuffs_base__pixel_swizzler__swap_rgbx_bgrx(wuffs_base__slice_u8 dst,
-                                           wuffs_base__slice_u8 src) {
-  size_t len4 = (dst.len < src.len ? dst.len : src.len) / 4;
-  uint8_t* d = dst.ptr;
-  uint8_t* s = src.ptr;
-
-  size_t n = len4;
-  while (n--) {
-    uint8_t b0 = s[0];
-    uint8_t b1 = s[1];
-    uint8_t b2 = s[2];
-    uint8_t b3 = s[3];
-    d[0] = b2;
-    d[1] = b1;
-    d[2] = b0;
-    d[3] = b3;
-    s += 4;
-    d += 4;
-  }
-  return len4 * 4;
-}
-
-// --------
-
 static wuffs_base__pixel_swizzler__func  //
 wuffs_base__pixel_swizzler__prepare__y(wuffs_base__pixel_swizzler* p,
                                        wuffs_base__pixel_format dst_format,
@@ -9415,7 +9437,7 @@
       }
       switch (blend) {
         case WUFFS_BASE__PIXEL_BLEND__SRC:
-          return wuffs_base__pixel_swizzler__xx__index__src;
+          return wuffs_base__pixel_swizzler__bgr_565__index__src;
       }
       return NULL;
 
@@ -9490,7 +9512,6 @@
       break;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
-    case WUFFS_BASE__PIXEL_FORMAT__RGB:
       // TODO.
       break;
 
@@ -9500,6 +9521,7 @@
     case WUFFS_BASE__PIXEL_FORMAT__BGRX:
       return wuffs_base__pixel_swizzler__xxxx__xxx;
 
+    case WUFFS_BASE__PIXEL_FORMAT__RGB:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY:
@@ -9523,12 +9545,10 @@
       break;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGR:
-    case WUFFS_BASE__PIXEL_FORMAT__RGB:
       // TODO.
       break;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGRA_NONPREMUL:
-    case WUFFS_BASE__PIXEL_FORMAT__BGRX:
       switch (blend) {
         case WUFFS_BASE__PIXEL_BLEND__SRC:
           return wuffs_base__pixel_swizzler__copy_4_4;
@@ -9547,9 +9567,11 @@
       return NULL;
 
     case WUFFS_BASE__PIXEL_FORMAT__BGRA_BINARY:
+    case WUFFS_BASE__PIXEL_FORMAT__BGRX:
       // TODO.
       break;
 
+    case WUFFS_BASE__PIXEL_FORMAT__RGB:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_NONPREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_PREMUL:
     case WUFFS_BASE__PIXEL_FORMAT__RGBA_BINARY: