Rename copy_n32_etc to limited_copy_u32_etc
diff --git a/internal/cgen/base/io-private.h b/internal/cgen/base/io-private.h
index 23114d0..4bd89f2 100644
--- a/internal/cgen/base/io-private.h
+++ b/internal/cgen/base/io-private.h
@@ -131,11 +131,11 @@
}
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_history(uint8_t** ptr_iop_w,
- uint8_t* io1_w,
- uint8_t* io2_w,
- uint32_t length,
- uint32_t distance) {
+wuffs_base__io_writer__limited_copy_u32_from_history(uint8_t** ptr_iop_w,
+ uint8_t* io1_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ uint32_t distance) {
if (!distance) {
return 0;
}
@@ -153,14 +153,14 @@
// TODO: unrolling by 3 seems best for the std/deflate benchmarks, but that
// is mostly because 3 is the minimum length for the deflate format. This
// function implementation shouldn't overfit to that one format. Perhaps the
- // copy_n32_from_history Wuffs method should also take an unroll hint
+ // limited_copy_u32_from_history Wuffs method should also take an unroll hint
// argument, and the cgen can look if that argument is the constant
// expression '3'.
//
- // See also wuffs_base__io_writer__copy_n32_from_history_fast below.
+ // See also wuffs_base__io_writer__limited_copy_u32_from_history_fast below.
//
- // Alternatively, or additionally, have a sloppy_copy_n32_from_history method
- // that copies 8 bytes at a time, possibly writing more than length bytes?
+ // Alternatively or additionally, have a sloppy_limited_copy_u32_from_history
+ // method that copies 8 bytes at a time, which can more than length bytes?
for (; n >= 3; n -= 3) {
*p++ = *q++;
*p++ = *q++;
@@ -173,18 +173,18 @@
return length;
}
-// wuffs_base__io_writer__copy_n32_from_history_fast is like the
-// wuffs_base__io_writer__copy_n32_from_history function above, but has
+// wuffs_base__io_writer__limited_copy_u32_from_history_fast is like the
+// wuffs_base__io_writer__limited_copy_u32_from_history function above, but has
// stronger pre-conditions. The caller needs to prove that:
// - distance > 0
// - distance <= (*ptr_iop_w - io1_w)
// - length <= (io2_w - *ptr_iop_w)
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_history_fast(uint8_t** ptr_iop_w,
- uint8_t* io1_w,
- uint8_t* io2_w,
- uint32_t length,
- uint32_t distance) {
+wuffs_base__io_writer__limited_copy_u32_from_history_fast(uint8_t** ptr_iop_w,
+ uint8_t* io1_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ uint32_t distance) {
uint8_t* p = *ptr_iop_w;
uint8_t* q = p - distance;
uint32_t n = length;
@@ -201,11 +201,11 @@
}
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_reader(uint8_t** ptr_iop_w,
- uint8_t* io2_w,
- uint32_t length,
- const uint8_t** ptr_iop_r,
- const uint8_t* io2_r) {
+wuffs_base__io_writer__limited_copy_u32_from_reader(uint8_t** ptr_iop_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ const uint8_t** ptr_iop_r,
+ const uint8_t* io2_r) {
uint8_t* iop_w = *ptr_iop_w;
size_t n = length;
if (n > ((size_t)(io2_w - iop_w))) {
@@ -224,10 +224,10 @@
}
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_slice(uint8_t** ptr_iop_w,
- uint8_t* io2_w,
- uint32_t length,
- wuffs_base__slice_u8 src) {
+wuffs_base__io_writer__limited_copy_u32_from_slice(uint8_t** ptr_iop_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ wuffs_base__slice_u8 src) {
uint8_t* iop_w = *ptr_iop_w;
size_t n = src.len;
if (n > length) {
diff --git a/internal/cgen/builtin.go b/internal/cgen/builtin.go
index 5eca912..debf53c 100644
--- a/internal/cgen/builtin.go
+++ b/internal/cgen/builtin.go
@@ -241,12 +241,12 @@
}
switch method {
- case t.IDCopyN32FromHistory, t.IDCopyN32FromHistoryFast:
+ case t.IDLimitedCopyU32FromHistory, t.IDLimitedCopyU32FromHistoryFast:
suffix := ""
- if method == t.IDCopyN32FromHistoryFast {
+ if method == t.IDLimitedCopyU32FromHistoryFast {
suffix = "_fast"
}
- b.printf("wuffs_base__io_writer__copy_n32_from_history%s(&%s%s, %s%s, %s%s",
+ b.printf("wuffs_base__io_writer__limited_copy_u32_from_history%s(&%s%s, %s%s, %s%s",
suffix, iopPrefix, name, io0Prefix, name, io2Prefix, name)
for _, o := range args {
b.writeb(',')
@@ -257,13 +257,13 @@
b.writeb(')')
return nil
- case t.IDCopyN32FromReader:
+ case t.IDLimitedCopyU32FromReader:
readerName, err := g.ioRecvName(args[1].AsArg().Value())
if err != nil {
return err
}
- b.printf("wuffs_base__io_writer__copy_n32_from_reader(&%s%s, %s%s,",
+ b.printf("wuffs_base__io_writer__limited_copy_u32_from_reader(&%s%s, %s%s,",
iopPrefix, name, io2Prefix, name)
if err := g.writeExpr(b, args[0].AsArg().Value(), depth); err != nil {
return err
@@ -276,8 +276,8 @@
iopPrefix, name, io2Prefix, name)
return g.writeArgs(b, args, depth)
- case t.IDCopyN32FromSlice:
- b.printf("wuffs_base__io_writer__copy_n32_from_slice(&%s%s, %s%s,",
+ case t.IDLimitedCopyU32FromSlice:
+ b.printf("wuffs_base__io_writer__limited_copy_u32_from_slice(&%s%s, %s%s,",
iopPrefix, name, io2Prefix, name)
return g.writeArgs(b, args, depth)
diff --git a/internal/cgen/data.go b/internal/cgen/data.go
index 1e6d285..4b871f2 100644
--- a/internal/cgen/data.go
+++ b/internal/cgen/data.go
@@ -331,11 +331,11 @@
" return ((a << shift) == (x << shift)) ? 0 : 2;\n }\n for (; n > 0; n--) {\n if (iop_r >= io2_r) {\n return (r && r->meta.closed) ? 2 : 1;\n } else if (*iop_r != ((uint8_t)(a))) {\n return 2;\n }\n iop_r++;\n a >>= 8;\n }\n return 0;\n}\n\nstatic inline wuffs_base__io_buffer* //\nwuffs_base__io_reader__set(wuffs_base__io_buffer* b,\n const uint8_t** ptr_iop_r,\n const uint8_t** ptr_io0_r,\n const uint8_t** ptr_io1_r,\n const uint8_t** ptr_io2_r,\n wuffs_base__slice_u8 data) {\n b->data = data;\n b->meta.wi = data.len;\n b->meta.ri = 0;\n b->meta.pos = 0;\n b->meta.closed = false;\n\n *ptr_iop_r = data.ptr;\n *ptr_io0_r = data.ptr;\n *ptr_io1_r = data.ptr;\n *ptr_io2_r = data.ptr + data.len;\n\n return b;\n}\n\n#pragma GCC diagnostic push\n#pragma GCC diagnostic ignored \"-Wcast-qual\"\n// TODO: can we avoid the const_cast (by deleting this function)? This might\n// involve conver" +
"ting the call sites to take an io_reader instead of a slice u8\n// (the result of io_reader.take).\nstatic inline wuffs_base__slice_u8 //\nwuffs_base__io_reader__take(const uint8_t** ptr_iop_r,\n const uint8_t* io2_r,\n uint64_t n) {\n if (n <= ((size_t)(io2_r - *ptr_iop_r))) {\n const uint8_t* p = *ptr_iop_r;\n *ptr_iop_r += n;\n // The arg is what C calls C++'s \"const_cast<uint8_t*>(p)\".\n return wuffs_base__make_slice_u8((uint8_t*)(p), n);\n }\n return wuffs_base__make_slice_u8(NULL, 0);\n}\n#pragma GCC diagnostic pop\n\n" +
"" +
- "// --------\n\nstatic inline uint64_t //\nwuffs_base__io_writer__copy_from_slice(uint8_t** ptr_iop_w,\n uint8_t* io2_w,\n wuffs_base__slice_u8 src) {\n uint8_t* iop_w = *ptr_iop_w;\n size_t n = src.len;\n if (n > ((size_t)(io2_w - iop_w))) {\n n = (size_t)(io2_w - iop_w);\n }\n if (n > 0) {\n memmove(iop_w, src.ptr, n);\n *ptr_iop_w += n;\n }\n return (uint64_t)(n);\n}\n\nstatic inline uint32_t //\nwuffs_base__io_writer__copy_n32_from_history(uint8_t** ptr_iop_w,\n uint8_t* io1_w,\n uint8_t* io2_w,\n uint32_t length,\n uint32_t distance) {\n if (!distance) {\n return 0;\n }\n uint8_t* p = *ptr_iop_w;\n if ((size_t)(p - io1_w) < (size_t)(distance)) {\n return 0;\n }\n uint8_t* q = p - distance;\n size_t n = (size_t)(io2_w - p);\n if ((size_t)(length) > n) {\n le" +
- "ngth = (uint32_t)(n);\n } else {\n n = (size_t)(length);\n }\n // TODO: unrolling by 3 seems best for the std/deflate benchmarks, but that\n // is mostly because 3 is the minimum length for the deflate format. This\n // function implementation shouldn't overfit to that one format. Perhaps the\n // copy_n32_from_history Wuffs method should also take an unroll hint\n // argument, and the cgen can look if that argument is the constant\n // expression '3'.\n //\n // See also wuffs_base__io_writer__copy_n32_from_history_fast below.\n //\n // Alternatively, or additionally, have a sloppy_copy_n32_from_history method\n // that copies 8 bytes at a time, possibly writing more than length bytes?\n for (; n >= 3; n -= 3) {\n *p++ = *q++;\n *p++ = *q++;\n *p++ = *q++;\n }\n for (; n; n--) {\n *p++ = *q++;\n }\n *ptr_iop_w = p;\n return length;\n}\n\n// wuffs_base__io_writer__copy_n32_from_history_fast is like the\n// wuffs_base__io_writer__copy_n32_from_history function above, but has\n// stronger pre-conditions. Th" +
- "e caller needs to prove that:\n// - distance > 0\n// - distance <= (*ptr_iop_w - io1_w)\n// - length <= (io2_w - *ptr_iop_w)\nstatic inline uint32_t //\nwuffs_base__io_writer__copy_n32_from_history_fast(uint8_t** ptr_iop_w,\n uint8_t* io1_w,\n uint8_t* io2_w,\n uint32_t length,\n uint32_t distance) {\n uint8_t* p = *ptr_iop_w;\n uint8_t* q = p - distance;\n uint32_t n = length;\n for (; n >= 3; n -= 3) {\n *p++ = *q++;\n *p++ = *q++;\n *p++ = *q++;\n }\n for (; n; n--) {\n *p++ = *q++;\n }\n *ptr_iop_w = p;\n return length;\n}\n\nstatic inline uint32_t //\nwuffs_base__io_writer__copy_n32_from_reader(uint8_t** ptr_iop_w,\n uint8_t* io2_w,\n uint32_t length,\n const uint8_t** ptr_iop_r,\n" +
- " const uint8_t* io2_r) {\n uint8_t* iop_w = *ptr_iop_w;\n size_t n = length;\n if (n > ((size_t)(io2_w - iop_w))) {\n n = (size_t)(io2_w - iop_w);\n }\n const uint8_t* iop_r = *ptr_iop_r;\n if (n > ((size_t)(io2_r - iop_r))) {\n n = (size_t)(io2_r - iop_r);\n }\n if (n > 0) {\n memmove(iop_w, iop_r, n);\n *ptr_iop_w += n;\n *ptr_iop_r += n;\n }\n return (uint32_t)(n);\n}\n\nstatic inline uint32_t //\nwuffs_base__io_writer__copy_n32_from_slice(uint8_t** ptr_iop_w,\n uint8_t* io2_w,\n uint32_t length,\n wuffs_base__slice_u8 src) {\n uint8_t* iop_w = *ptr_iop_w;\n size_t n = src.len;\n if (n > length) {\n n = length;\n }\n if (n > ((size_t)(io2_w - iop_w))) {\n n = (size_t)(io2_w - iop_w);\n }\n if (n > 0) {\n memmove(iop_w, src.ptr, n);\n *ptr_iop_w += n;\n }\n return (uint32_t)(n);\n}\n\nstatic inline wuffs_base__io_buffer* //\nwuffs_b" +
- "ase__io_writer__set(wuffs_base__io_buffer* b,\n uint8_t** ptr_iop_w,\n uint8_t** ptr_io0_w,\n uint8_t** ptr_io1_w,\n uint8_t** ptr_io2_w,\n wuffs_base__slice_u8 data) {\n b->data = data;\n b->meta.wi = 0;\n b->meta.ri = 0;\n b->meta.pos = 0;\n b->meta.closed = false;\n\n *ptr_iop_w = data.ptr;\n *ptr_io0_w = data.ptr;\n *ptr_io1_w = data.ptr;\n *ptr_io2_w = data.ptr + data.len;\n\n return b;\n}\n\n " +
+ "// --------\n\nstatic inline uint64_t //\nwuffs_base__io_writer__copy_from_slice(uint8_t** ptr_iop_w,\n uint8_t* io2_w,\n wuffs_base__slice_u8 src) {\n uint8_t* iop_w = *ptr_iop_w;\n size_t n = src.len;\n if (n > ((size_t)(io2_w - iop_w))) {\n n = (size_t)(io2_w - iop_w);\n }\n if (n > 0) {\n memmove(iop_w, src.ptr, n);\n *ptr_iop_w += n;\n }\n return (uint64_t)(n);\n}\n\nstatic inline uint32_t //\nwuffs_base__io_writer__limited_copy_u32_from_history(uint8_t** ptr_iop_w,\n uint8_t* io1_w,\n uint8_t* io2_w,\n uint32_t length,\n uint32_t distance) {\n if (!distance) {\n return 0;\n }\n uint8_t* p = *ptr_iop_w;\n if ((size_t)(p - io1_w) < (size_t)(distance)) {\n return 0;\n }\n uint8_t* q = p - distance;\n size_t n = (size_t)(io2_w - " +
+ "p);\n if ((size_t)(length) > n) {\n length = (uint32_t)(n);\n } else {\n n = (size_t)(length);\n }\n // TODO: unrolling by 3 seems best for the std/deflate benchmarks, but that\n // is mostly because 3 is the minimum length for the deflate format. This\n // function implementation shouldn't overfit to that one format. Perhaps the\n // limited_copy_u32_from_history Wuffs method should also take an unroll hint\n // argument, and the cgen can look if that argument is the constant\n // expression '3'.\n //\n // See also wuffs_base__io_writer__limited_copy_u32_from_history_fast below.\n //\n // Alternatively or additionally, have a sloppy_limited_copy_u32_from_history\n // method that copies 8 bytes at a time, which can more than length bytes?\n for (; n >= 3; n -= 3) {\n *p++ = *q++;\n *p++ = *q++;\n *p++ = *q++;\n }\n for (; n; n--) {\n *p++ = *q++;\n }\n *ptr_iop_w = p;\n return length;\n}\n\n// wuffs_base__io_writer__limited_copy_u32_from_history_fast is like the\n// wuffs_base__io_writer__limited_copy" +
+ "_u32_from_history function above, but has\n// stronger pre-conditions. The caller needs to prove that:\n// - distance > 0\n// - distance <= (*ptr_iop_w - io1_w)\n// - length <= (io2_w - *ptr_iop_w)\nstatic inline uint32_t //\nwuffs_base__io_writer__limited_copy_u32_from_history_fast(uint8_t** ptr_iop_w,\n uint8_t* io1_w,\n uint8_t* io2_w,\n uint32_t length,\n uint32_t distance) {\n uint8_t* p = *ptr_iop_w;\n uint8_t* q = p - distance;\n uint32_t n = length;\n for (; n >= 3; n -= 3) {\n *p++ = *q++;\n *p++ = *q++;\n *p++ = *q++;\n }\n for (; n; n--) {\n *p++ = *q++;\n }\n *ptr_iop_w = p;\n return length;\n}\n\nstatic inline uint32_t //\nwuffs_base__io_writer__limited_copy_u32_from_reader(uint8_t** ptr_iop_w,\n uint8_t* io2_w,\n " +
+ " uint32_t length,\n const uint8_t** ptr_iop_r,\n const uint8_t* io2_r) {\n uint8_t* iop_w = *ptr_iop_w;\n size_t n = length;\n if (n > ((size_t)(io2_w - iop_w))) {\n n = (size_t)(io2_w - iop_w);\n }\n const uint8_t* iop_r = *ptr_iop_r;\n if (n > ((size_t)(io2_r - iop_r))) {\n n = (size_t)(io2_r - iop_r);\n }\n if (n > 0) {\n memmove(iop_w, iop_r, n);\n *ptr_iop_w += n;\n *ptr_iop_r += n;\n }\n return (uint32_t)(n);\n}\n\nstatic inline uint32_t //\nwuffs_base__io_writer__limited_copy_u32_from_slice(uint8_t** ptr_iop_w,\n uint8_t* io2_w,\n uint32_t length,\n wuffs_base__slice_u8 src) {\n uint8_t* iop_w = *ptr_iop_w;\n size_t n = src.len;\n if (n > length) {\n n = length;\n }\n if (n > ((size_t)(io2_w - iop_w))) {\n" +
+ " n = (size_t)(io2_w - iop_w);\n }\n if (n > 0) {\n memmove(iop_w, src.ptr, n);\n *ptr_iop_w += n;\n }\n return (uint32_t)(n);\n}\n\nstatic inline wuffs_base__io_buffer* //\nwuffs_base__io_writer__set(wuffs_base__io_buffer* b,\n uint8_t** ptr_iop_w,\n uint8_t** ptr_io0_w,\n uint8_t** ptr_io1_w,\n uint8_t** ptr_io2_w,\n wuffs_base__slice_u8 data) {\n b->data = data;\n b->meta.wi = 0;\n b->meta.ri = 0;\n b->meta.pos = 0;\n b->meta.closed = false;\n\n *ptr_iop_w = data.ptr;\n *ptr_io0_w = data.ptr;\n *ptr_io1_w = data.ptr;\n *ptr_io2_w = data.ptr + data.len;\n\n return b;\n}\n\n " +
"" +
"// ---------------- I/O (Utility)\n\n#define wuffs_base__utility__empty_io_reader wuffs_base__empty_io_reader\n#define wuffs_base__utility__empty_io_writer wuffs_base__empty_io_writer\n" +
""
diff --git a/lang/builtin/builtin.go b/lang/builtin/builtin.go
index b8edef9..6dd1183 100644
--- a/lang/builtin/builtin.go
+++ b/lang/builtin/builtin.go
@@ -331,16 +331,16 @@
"io_writer.since(mark: u64) slice u8",
"io_writer.copy_from_slice!(s: slice u8) u64",
- "io_writer.copy_n32_from_history!(n: u32, distance: u32) u32",
- "io_writer.copy_n32_from_reader!(n: u32, r: io_reader) u32",
- "io_writer.copy_n32_from_slice!(n: u32, s: slice u8) u32",
+ "io_writer.limited_copy_u32_from_history!(up_to: u32, distance: u32) u32",
+ "io_writer.limited_copy_u32_from_reader!(up_to: u32, r: io_reader) u32",
+ "io_writer.limited_copy_u32_from_slice!(up_to: u32, s: slice u8) u32",
// TODO: this should have explicit pre-conditions:
- // - n <= this.available()
+ // - up_to <= this.available()
// - distance > 0
// - distance <= this.since_mark().length()
// For now, that's all implicitly checked (i.e. hard coded).
- "io_writer.copy_n32_from_history_fast!(n: u32, distance: u32) u32",
+ "io_writer.limited_copy_u32_from_history_fast!(up_to: u32, distance: u32) u32",
// ---- token_writer
diff --git a/lang/check/bounds.go b/lang/check/bounds.go
index 370479f..80d9bd9 100644
--- a/lang/check/bounds.go
+++ b/lang/check/bounds.go
@@ -1019,8 +1019,8 @@
return bounds{}, err
}
- } else if method == t.IDCopyN32FromHistoryFast {
- if err := q.canCopyN32FromHistoryFast(recv, n.Args()); err != nil {
+ } else if method == t.IDLimitedCopyU32FromHistoryFast {
+ if err := q.canLimitedCopyU32FromHistoryFast(recv, n.Args()); err != nil {
return bounds{}, err
}
@@ -1100,7 +1100,7 @@
return fmt.Errorf("check: could not prove %s.can_undo_byte()", recv.Str(q.tm))
}
-func (q *checker) canCopyN32FromHistoryFast(recv *a.Expr, args []*a.Node) error {
+func (q *checker) canLimitedCopyU32FromHistoryFast(recv *a.Expr, args []*a.Node) error {
// As per cgen's io-private.h, there are three pre-conditions:
// - n <= this.available()
// - distance > 0
diff --git a/lang/token/list.go b/lang/token/list.go
index 421452e..28e9cde 100644
--- a/lang/token/list.go
+++ b/lang/token/list.go
@@ -517,11 +517,11 @@
IDSkip32Fast = ID(0x16C)
IDTake = ID(0x16D)
- IDCopyFromSlice = ID(0x170)
- IDCopyN32FromHistory = ID(0x171)
- IDCopyN32FromHistoryFast = ID(0x172)
- IDCopyN32FromReader = ID(0x173)
- IDCopyN32FromSlice = ID(0x174)
+ IDCopyFromSlice = ID(0x170)
+ IDLimitedCopyU32FromHistory = ID(0x171)
+ IDLimitedCopyU32FromHistoryFast = ID(0x172)
+ IDLimitedCopyU32FromReader = ID(0x173)
+ IDLimitedCopyU32FromSlice = ID(0x174)
// -------- 0x180 block.
@@ -878,11 +878,11 @@
IDSkip32Fast: "skip32_fast",
IDTake: "take",
- IDCopyFromSlice: "copy_from_slice",
- IDCopyN32FromHistory: "copy_n32_from_history",
- IDCopyN32FromHistoryFast: "copy_n32_from_history_fast",
- IDCopyN32FromReader: "copy_n32_from_reader",
- IDCopyN32FromSlice: "copy_n32_from_slice",
+ IDCopyFromSlice: "copy_from_slice",
+ IDLimitedCopyU32FromHistory: "limited_copy_u32_from_history",
+ IDLimitedCopyU32FromHistoryFast: "limited_copy_u32_from_history_fast",
+ IDLimitedCopyU32FromReader: "limited_copy_u32_from_reader",
+ IDLimitedCopyU32FromSlice: "limited_copy_u32_from_slice",
// -------- 0x180 block.
diff --git a/release/c/wuffs-unsupported-snapshot.c b/release/c/wuffs-unsupported-snapshot.c
index e310080..4d68520 100644
--- a/release/c/wuffs-unsupported-snapshot.c
+++ b/release/c/wuffs-unsupported-snapshot.c
@@ -7738,11 +7738,11 @@
}
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_history(uint8_t** ptr_iop_w,
- uint8_t* io1_w,
- uint8_t* io2_w,
- uint32_t length,
- uint32_t distance) {
+wuffs_base__io_writer__limited_copy_u32_from_history(uint8_t** ptr_iop_w,
+ uint8_t* io1_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ uint32_t distance) {
if (!distance) {
return 0;
}
@@ -7760,14 +7760,14 @@
// TODO: unrolling by 3 seems best for the std/deflate benchmarks, but that
// is mostly because 3 is the minimum length for the deflate format. This
// function implementation shouldn't overfit to that one format. Perhaps the
- // copy_n32_from_history Wuffs method should also take an unroll hint
+ // limited_copy_u32_from_history Wuffs method should also take an unroll hint
// argument, and the cgen can look if that argument is the constant
// expression '3'.
//
- // See also wuffs_base__io_writer__copy_n32_from_history_fast below.
+ // See also wuffs_base__io_writer__limited_copy_u32_from_history_fast below.
//
- // Alternatively, or additionally, have a sloppy_copy_n32_from_history method
- // that copies 8 bytes at a time, possibly writing more than length bytes?
+ // Alternatively or additionally, have a sloppy_limited_copy_u32_from_history
+ // method that copies 8 bytes at a time, which can more than length bytes?
for (; n >= 3; n -= 3) {
*p++ = *q++;
*p++ = *q++;
@@ -7780,18 +7780,18 @@
return length;
}
-// wuffs_base__io_writer__copy_n32_from_history_fast is like the
-// wuffs_base__io_writer__copy_n32_from_history function above, but has
+// wuffs_base__io_writer__limited_copy_u32_from_history_fast is like the
+// wuffs_base__io_writer__limited_copy_u32_from_history function above, but has
// stronger pre-conditions. The caller needs to prove that:
// - distance > 0
// - distance <= (*ptr_iop_w - io1_w)
// - length <= (io2_w - *ptr_iop_w)
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_history_fast(uint8_t** ptr_iop_w,
- uint8_t* io1_w,
- uint8_t* io2_w,
- uint32_t length,
- uint32_t distance) {
+wuffs_base__io_writer__limited_copy_u32_from_history_fast(uint8_t** ptr_iop_w,
+ uint8_t* io1_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ uint32_t distance) {
uint8_t* p = *ptr_iop_w;
uint8_t* q = p - distance;
uint32_t n = length;
@@ -7808,11 +7808,11 @@
}
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_reader(uint8_t** ptr_iop_w,
- uint8_t* io2_w,
- uint32_t length,
- const uint8_t** ptr_iop_r,
- const uint8_t* io2_r) {
+wuffs_base__io_writer__limited_copy_u32_from_reader(uint8_t** ptr_iop_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ const uint8_t** ptr_iop_r,
+ const uint8_t* io2_r) {
uint8_t* iop_w = *ptr_iop_w;
size_t n = length;
if (n > ((size_t)(io2_w - iop_w))) {
@@ -7831,10 +7831,10 @@
}
static inline uint32_t //
-wuffs_base__io_writer__copy_n32_from_slice(uint8_t** ptr_iop_w,
- uint8_t* io2_w,
- uint32_t length,
- wuffs_base__slice_u8 src) {
+wuffs_base__io_writer__limited_copy_u32_from_slice(uint8_t** ptr_iop_w,
+ uint8_t* io2_w,
+ uint32_t length,
+ wuffs_base__slice_u8 src) {
uint8_t* iop_w = *ptr_iop_w;
size_t n = src.len;
if (n > length) {
@@ -15079,7 +15079,7 @@
}
v_length = ((v_length)&0xFFFF);
while (true) {
- v_n_copied = wuffs_base__io_writer__copy_n32_from_reader(
+ v_n_copied = wuffs_base__io_writer__limited_copy_u32_from_reader(
&iop_a_dst, io2_a_dst, v_length, &iop_a_src, io2_a_src);
if (v_length <= v_n_copied) {
status = wuffs_base__make_status(NULL);
@@ -15948,7 +15948,7 @@
goto exit;
}
v_hdist = (self->private_impl.f_history_index - v_hdist);
- wuffs_base__io_writer__copy_n32_from_slice(
+ wuffs_base__io_writer__limited_copy_u32_from_slice(
&iop_a_dst, io2_a_dst, v_hlen,
wuffs_base__slice_u8__subslice_i(
wuffs_base__make_slice_u8(self->private_data.f_history, 33025),
@@ -15963,7 +15963,7 @@
goto exit;
}
}
- wuffs_base__io_writer__copy_n32_from_history_fast(
+ wuffs_base__io_writer__limited_copy_u32_from_history_fast(
&iop_a_dst, io0_a_dst, io2_a_dst, v_length, (v_dist_minus_1 + 1));
goto label__0__break;
}
@@ -16314,7 +16314,7 @@
}
v_hdist = (self->private_impl.f_history_index - v_hdist);
while (true) {
- v_n_copied = wuffs_base__io_writer__copy_n32_from_slice(
+ v_n_copied = wuffs_base__io_writer__limited_copy_u32_from_slice(
&iop_a_dst, io2_a_dst, v_hlen,
wuffs_base__slice_u8__subslice_ij(
wuffs_base__make_slice_u8(self->private_data.f_history,
@@ -16338,7 +16338,7 @@
label__4__break:;
if (v_hlen > 0) {
while (true) {
- v_n_copied = wuffs_base__io_writer__copy_n32_from_slice(
+ v_n_copied = wuffs_base__io_writer__limited_copy_u32_from_slice(
&iop_a_dst, io2_a_dst, v_hlen,
wuffs_base__slice_u8__subslice_ij(
wuffs_base__make_slice_u8(self->private_data.f_history,
@@ -16360,7 +16360,7 @@
goto label__loop__continue;
}
}
- v_n_copied = wuffs_base__io_writer__copy_n32_from_history(
+ v_n_copied = wuffs_base__io_writer__limited_copy_u32_from_history(
&iop_a_dst, io0_a_dst, io2_a_dst, v_length, (v_dist_minus_1 + 1));
if (v_length <= v_n_copied) {
v_length = 0;
diff --git a/std/deflate/decode_deflate.wuffs b/std/deflate/decode_deflate.wuffs
index 20c5e5c..398fa06 100644
--- a/std/deflate/decode_deflate.wuffs
+++ b/std/deflate/decode_deflate.wuffs
@@ -294,7 +294,7 @@
}
length = length.low_bits(n: 16)
while true {
- n_copied = args.dst.copy_n32_from_reader!(n: length, r: args.src)
+ n_copied = args.dst.limited_copy_u32_from_reader!(up_to: length, r: args.src)
if length <= n_copied {
return ok
}
diff --git a/std/deflate/decode_huffman_fast.wuffs b/std/deflate/decode_huffman_fast.wuffs
index 61ce260..53c046a 100644
--- a/std/deflate/decode_huffman_fast.wuffs
+++ b/std/deflate/decode_huffman_fast.wuffs
@@ -317,8 +317,8 @@
// This copying is simpler than the decode_huffman_slow version
// because it cannot yield. We have already checked that
// args.dst.available() is large enough.
- args.dst.copy_n32_from_slice!(
- n: hlen, s: this.history[hdist & 0x7FFF ..])
+ args.dst.limited_copy_u32_from_slice!(
+ up_to: hlen, s: this.history[hdist & 0x7FFF ..])
if length == 0 {
// No need to copy from args.dst.
@@ -339,7 +339,8 @@
assert (length as base.u64) <= args.dst.available() via "a <= b: a <= c; c <= b"(c: 258)
// Copy from args.dst.
- args.dst.copy_n32_from_history_fast!(n: length, distance: (dist_minus_1 + 1))
+ args.dst.limited_copy_u32_from_history_fast!(
+ up_to: length, distance: (dist_minus_1 + 1))
break
} endwhile
} endwhile.loop
diff --git a/std/deflate/decode_huffman_slow.wuffs b/std/deflate/decode_huffman_slow.wuffs
index 474148e..044aa35 100644
--- a/std/deflate/decode_huffman_slow.wuffs
+++ b/std/deflate/decode_huffman_slow.wuffs
@@ -226,8 +226,8 @@
// Copy from hdist to the end of this.history.
while true {
- n_copied = args.dst.copy_n32_from_slice!(
- n: hlen, s: this.history[hdist & 0x7FFF .. 0x8000])
+ n_copied = args.dst.limited_copy_u32_from_slice!(
+ up_to: hlen, s: this.history[hdist & 0x7FFF .. 0x8000])
if hlen <= n_copied {
hlen = 0
break
@@ -245,8 +245,8 @@
// Copy from the start of this.history, if we wrapped around.
if hlen > 0 {
while true {
- n_copied = args.dst.copy_n32_from_slice!(
- n: hlen, s: this.history[hdist & 0x7FFF .. 0x8000])
+ n_copied = args.dst.limited_copy_u32_from_slice!(
+ up_to: hlen, s: this.history[hdist & 0x7FFF .. 0x8000])
if hlen <= n_copied {
hlen = 0
break
@@ -264,7 +264,8 @@
}
// Copy from args.dst.
- n_copied = args.dst.copy_n32_from_history!(n: length, distance: (dist_minus_1 + 1))
+ n_copied = args.dst.limited_copy_u32_from_history!(
+ up_to: length, distance: (dist_minus_1 + 1))
if length <= n_copied {
length = 0
break