master
   1const Reader = @This();
   2
   3const builtin = @import("builtin");
   4const native_endian = builtin.target.cpu.arch.endian();
   5
   6const std = @import("../std.zig");
   7const Writer = std.Io.Writer;
   8const Limit = std.Io.Limit;
   9const assert = std.debug.assert;
  10const testing = std.testing;
  11const Allocator = std.mem.Allocator;
  12const ArrayList = std.ArrayList;
  13
  14pub const Limited = @import("Reader/Limited.zig");
  15
  16vtable: *const VTable,
  17buffer: []u8,
  18/// Number of bytes which have been consumed from `buffer`.
  19seek: usize,
  20/// In `buffer` before this are buffered bytes, after this is `undefined`.
  21end: usize,
  22
  23pub const VTable = struct {
  24    /// Writes bytes from the internally tracked logical position to `w`.
  25    ///
  26    /// Returns the number of bytes written, which will be at minimum `0` and
  27    /// at most `limit`. The number returned, including zero, does not indicate
  28    /// end of stream.
  29    ///
  30    /// The reader's internal logical seek position moves forward in accordance
  31    /// with the number of bytes returned from this function.
  32    ///
  33    /// Implementations are encouraged to utilize mandatory minimum buffer
  34    /// sizes combined with short reads (returning a value less than `limit`)
  35    /// in order to minimize complexity.
  36    ///
  37    /// Although this function is usually called when `buffer` is empty, it is
  38    /// also called when it needs to be filled more due to the API user
  39    /// requesting contiguous memory. In either case, the existing buffer data
  40    /// should be ignored; new data written to `w`.
  41    ///
  42    /// In addition to, or instead of writing to `w`, the implementation may
  43    /// choose to store data in `buffer`, modifying `seek` and `end`
  44    /// accordingly. Implementations are encouraged to take advantage of
  45    /// this if it simplifies the logic.
  46    stream: *const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize,
  47
  48    /// Consumes bytes from the internally tracked stream position without
  49    /// providing access to them.
  50    ///
  51    /// Returns the number of bytes discarded, which will be at minimum `0` and
  52    /// at most `limit`. The number of bytes returned, including zero, does not
  53    /// indicate end of stream.
  54    ///
  55    /// The reader's internal logical seek position moves forward in accordance
  56    /// with the number of bytes returned from this function.
  57    ///
  58    /// Implementations are encouraged to utilize mandatory minimum buffer
  59    /// sizes combined with short reads (returning a value less than `limit`)
  60    /// in order to minimize complexity.
  61    ///
  62    /// The default implementation is is based on calling `stream`, borrowing
  63    /// `buffer` to construct a temporary `Writer` and ignoring the written
  64    /// data.
  65    ///
  66    /// This function is only called when `buffer` is empty.
  67    discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard,
  68
  69    /// Returns number of bytes written to `data`.
  70    ///
  71    /// `data` must have nonzero length. `data[0]` may have zero length, in
  72    /// which case the implementation must write to `Reader.buffer`.
  73    ///
  74    /// `data` may not contain an alias to `Reader.buffer`.
  75    ///
  76    /// `data` is mutable because the implementation may temporarily modify the
  77    /// fields in order to handle partial reads. Implementations must restore
  78    /// the original value before returning.
  79    ///
  80    /// Implementations may ignore `data`, writing directly to `Reader.buffer`,
  81    /// modifying `seek` and `end` accordingly, and returning 0 from this
  82    /// function. Implementations are encouraged to take advantage of this if
  83    /// it simplifies the logic.
  84    ///
  85    /// The default implementation calls `stream` with either `data[0]` or
  86    /// `Reader.buffer`, whichever is bigger.
  87    readVec: *const fn (r: *Reader, data: [][]u8) Error!usize = defaultReadVec,
  88
  89    /// Ensures `capacity` data can be buffered without rebasing.
  90    ///
  91    /// Asserts `capacity` is within buffer capacity, or that the stream ends
  92    /// within `capacity` bytes.
  93    ///
  94    /// Only called when `capacity` cannot be satisfied by unused capacity of
  95    /// `buffer`.
  96    ///
  97    /// The default implementation moves buffered data to the start of
  98    /// `buffer`, setting `seek` to zero, and cannot fail.
  99    rebase: *const fn (r: *Reader, capacity: usize) RebaseError!void = defaultRebase,
 100};
 101
 102pub const StreamError = error{
 103    /// See the `Reader` implementation for detailed diagnostics.
 104    ReadFailed,
 105    /// See the `Writer` implementation for detailed diagnostics.
 106    WriteFailed,
 107    /// End of stream indicated from the `Reader`. This error cannot originate
 108    /// from the `Writer`.
 109    EndOfStream,
 110};
 111
 112pub const Error = error{
 113    /// See the `Reader` implementation for detailed diagnostics.
 114    ReadFailed,
 115    EndOfStream,
 116};
 117
 118pub const StreamRemainingError = error{
 119    /// See the `Reader` implementation for detailed diagnostics.
 120    ReadFailed,
 121    /// See the `Writer` implementation for detailed diagnostics.
 122    WriteFailed,
 123};
 124
 125pub const ShortError = error{
 126    /// See the `Reader` implementation for detailed diagnostics.
 127    ReadFailed,
 128};
 129
 130pub const RebaseError = error{
 131    EndOfStream,
 132};
 133
 134pub const failing: Reader = .{
 135    .vtable = &.{
 136        .stream = failingStream,
 137        .discard = failingDiscard,
 138    },
 139    .buffer = &.{},
 140    .seek = 0,
 141    .end = 0,
 142};
 143
 144/// This is generally safe to `@constCast` because it has an empty buffer, so
 145/// there is not really a way to accidentally attempt mutation of these fields.
 146pub const ending_instance: Reader = .fixed(&.{});
 147pub const ending: *Reader = @constCast(&ending_instance);
 148
 149pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
 150    return .init(r, limit, buffer);
 151}
 152
 153/// Constructs a `Reader` such that it will read from `buffer` and then end.
 154pub fn fixed(buffer: []const u8) Reader {
 155    return .{
 156        .vtable = &.{
 157            .stream = endingStream,
 158            .discard = endingDiscard,
 159            .readVec = endingReadVec,
 160            .rebase = endingRebase,
 161        },
 162        // This cast is safe because all potential writes to it will instead
 163        // return `error.EndOfStream`.
 164        .buffer = @constCast(buffer),
 165        .end = buffer.len,
 166        .seek = 0,
 167    };
 168}
 169
 170pub fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
 171    const buffer = limit.slice(r.buffer[r.seek..r.end]);
 172    if (buffer.len > 0) {
 173        @branchHint(.likely);
 174        const n = try w.write(buffer);
 175        r.seek += n;
 176        return n;
 177    }
 178    const n = try r.vtable.stream(r, w, limit);
 179    assert(n <= @intFromEnum(limit));
 180    return n;
 181}
 182
 183pub fn discard(r: *Reader, limit: Limit) Error!usize {
 184    const buffered_len = r.end - r.seek;
 185    const remaining: Limit = if (limit.toInt()) |n| l: {
 186        if (buffered_len >= n) {
 187            r.seek += n;
 188            return n;
 189        }
 190        break :l .limited(n - buffered_len);
 191    } else .unlimited;
 192    r.seek = r.end;
 193    const n = try r.vtable.discard(r, remaining);
 194    assert(n <= @intFromEnum(remaining));
 195    return buffered_len + n;
 196}
 197
 198pub fn defaultDiscard(r: *Reader, limit: Limit) Error!usize {
 199    assert(r.seek == r.end);
 200    r.seek = 0;
 201    r.end = 0;
 202    var d: Writer.Discarding = .init(r.buffer);
 203    var n = r.stream(&d.writer, limit) catch |err| switch (err) {
 204        error.WriteFailed => unreachable,
 205        error.ReadFailed => return error.ReadFailed,
 206        error.EndOfStream => return error.EndOfStream,
 207    };
 208    // If `stream` wrote to `r.buffer` without going through the writer,
 209    // we need to discard as much of the buffered data as possible.
 210    const remaining = @intFromEnum(limit) - n;
 211    const buffered_n_to_discard = @min(remaining, r.end - r.seek);
 212    n += buffered_n_to_discard;
 213    r.seek += buffered_n_to_discard;
 214    assert(n <= @intFromEnum(limit));
 215    return n;
 216}
 217
 218/// "Pump" exactly `n` bytes from the reader to the writer.
 219pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void {
 220    var remaining = n;
 221    while (remaining != 0) remaining -= try r.stream(w, .limited(remaining));
 222}
 223
 224/// "Pump" exactly `n` bytes from the reader to the writer.
 225pub fn streamExact64(r: *Reader, w: *Writer, n: u64) StreamError!void {
 226    var remaining = n;
 227    while (remaining != 0) remaining -= try r.stream(w, .limited64(remaining));
 228}
 229
 230/// "Pump" exactly `n` bytes from the reader to the writer.
 231///
 232/// When draining `w`, ensures that at least `preserve_len` bytes remain
 233/// buffered.
 234///
 235/// Asserts `Writer.buffer` capacity exceeds `preserve_len`.
 236pub fn streamExactPreserve(r: *Reader, w: *Writer, preserve_len: usize, n: usize) StreamError!void {
 237    if (w.end + n <= w.buffer.len) {
 238        @branchHint(.likely);
 239        return streamExact(r, w, n);
 240    }
 241    // If `n` is large, we can ignore `preserve_len` up to a point.
 242    var remaining = n;
 243    while (remaining > preserve_len) {
 244        assert(remaining != 0);
 245        remaining -= try r.stream(w, .limited(remaining - preserve_len));
 246        if (w.end + remaining <= w.buffer.len) return streamExact(r, w, remaining);
 247    }
 248    // All the next bytes received must be preserved.
 249    if (preserve_len < w.end) {
 250        @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]);
 251        w.end = preserve_len;
 252    }
 253    return streamExact(r, w, remaining);
 254}
 255
 256/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as
 257/// a success case.
 258///
 259/// Returns total number of bytes written to `w`.
 260pub fn streamRemaining(r: *Reader, w: *Writer) StreamRemainingError!usize {
 261    var offset: usize = 0;
 262    while (true) {
 263        offset += r.stream(w, .unlimited) catch |err| switch (err) {
 264            error.EndOfStream => return offset,
 265            else => |e| return e,
 266        };
 267    }
 268}
 269
 270/// Consumes the stream until the end, ignoring all the data, returning the
 271/// number of bytes discarded.
 272pub fn discardRemaining(r: *Reader) ShortError!usize {
 273    var offset: usize = r.end - r.seek;
 274    r.seek = r.end;
 275    while (true) {
 276        offset += r.vtable.discard(r, .unlimited) catch |err| switch (err) {
 277            error.EndOfStream => return offset,
 278            else => |e| return e,
 279        };
 280    }
 281}
 282
 283pub const LimitedAllocError = Allocator.Error || ShortError || error{StreamTooLong};
 284
 285/// Transfers all bytes from the current position to the end of the stream, up
 286/// to `limit`, returning them as a caller-owned allocated slice.
 287///
 288/// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In
 289/// such case, the next byte that would be read will be the first one to exceed
 290/// `limit`, and all preceeding bytes have been discarded.
 291///
 292/// See also:
 293/// * `appendRemaining`
 294pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 {
 295    var buffer: ArrayList(u8) = .empty;
 296    defer buffer.deinit(gpa);
 297    try appendRemaining(r, gpa, &buffer, limit);
 298    return buffer.toOwnedSlice(gpa);
 299}
 300
 301pub fn allocRemainingAlignedSentinel(
 302    r: *Reader,
 303    gpa: Allocator,
 304    limit: Limit,
 305    comptime alignment: std.mem.Alignment,
 306    comptime sentinel: ?u8,
 307) LimitedAllocError!(if (sentinel) |s| [:s]align(alignment.toByteUnits()) u8 else []align(alignment.toByteUnits()) u8) {
 308    var buffer: std.array_list.Aligned(u8, alignment) = .empty;
 309    defer buffer.deinit(gpa);
 310    try appendRemainingAligned(r, gpa, alignment, &buffer, limit);
 311    if (sentinel) |s| {
 312        return buffer.toOwnedSliceSentinel(gpa, s);
 313    } else {
 314        return buffer.toOwnedSlice(gpa);
 315    }
 316}
 317
 318/// Transfers all bytes from the current position to the end of the stream, up
 319/// to `limit`, appending them to `list`.
 320///
 321/// If `limit` is reached or exceeded, `error.StreamTooLong` is returned
 322/// instead. In such case, the next byte that would be read will be the first
 323/// one to exceed `limit`, and all preceeding bytes have been appended to
 324/// `list`.
 325///
 326/// See also:
 327/// * `allocRemaining`
 328pub fn appendRemaining(
 329    r: *Reader,
 330    gpa: Allocator,
 331    list: *ArrayList(u8),
 332    limit: Limit,
 333) LimitedAllocError!void {
 334    return appendRemainingAligned(r, gpa, .of(u8), list, limit);
 335}
 336
 337/// Transfers all bytes from the current position to the end of the stream, up
 338/// to `limit`, appending them to `list`.
 339///
 340/// If `limit` is reached or exceeded, `error.StreamTooLong` is returned
 341/// instead. In such case, the next byte that would be read will be the first
 342/// one to exceed `limit`, and all preceeding bytes have been appended to
 343/// `list`.
 344///
 345/// See also:
 346/// * `appendRemaining`
 347/// * `allocRemainingAligned`
 348pub fn appendRemainingAligned(
 349    r: *Reader,
 350    gpa: Allocator,
 351    comptime alignment: std.mem.Alignment,
 352    list: *std.array_list.Aligned(u8, alignment),
 353    limit: Limit,
 354) LimitedAllocError!void {
 355    var a = std.Io.Writer.Allocating.fromArrayListAligned(gpa, alignment, list);
 356    defer list.* = a.toArrayListAligned(alignment);
 357
 358    var remaining = limit;
 359    while (remaining.nonzero()) {
 360        const n = stream(r, &a.writer, remaining) catch |err| switch (err) {
 361            error.EndOfStream => return,
 362            error.WriteFailed => return error.OutOfMemory,
 363            error.ReadFailed => return error.ReadFailed,
 364        };
 365        remaining = remaining.subtract(n).?;
 366    }
 367    return error.StreamTooLong;
 368}
 369
 370pub const UnlimitedAllocError = Allocator.Error || ShortError;
 371
 372pub fn appendRemainingUnlimited(r: *Reader, gpa: Allocator, list: *ArrayList(u8)) UnlimitedAllocError!void {
 373    var a: std.Io.Writer.Allocating = .initOwnedSlice(gpa, list.allocatedSlice());
 374    a.writer.end = list.items.len;
 375    list.* = .empty;
 376    defer {
 377        list.* = .{
 378            .items = a.writer.buffer[0..a.writer.end],
 379            .capacity = a.writer.buffer.len,
 380        };
 381    }
 382    _ = streamRemaining(r, &a.writer) catch |err| switch (err) {
 383        error.WriteFailed => return error.OutOfMemory,
 384        error.ReadFailed => return error.ReadFailed,
 385    };
 386}
 387
 388/// Writes bytes from the internally tracked stream position to `data`.
 389///
 390/// Returns the number of bytes written, which will be at minimum `0` and
 391/// at most the sum of each data slice length. The number of bytes read,
 392/// including zero, does not indicate end of stream.
 393///
 394/// The reader's internal logical seek position moves forward in accordance
 395/// with the number of bytes returned from this function.
 396pub fn readVec(r: *Reader, data: [][]u8) Error!usize {
 397    var seek = r.seek;
 398    for (data, 0..) |buf, i| {
 399        const contents = r.buffer[seek..r.end];
 400        const copy_len = @min(contents.len, buf.len);
 401        @memcpy(buf[0..copy_len], contents[0..copy_len]);
 402        seek += copy_len;
 403        if (buf.len - copy_len == 0) continue;
 404
 405        // All of `buffer` has been copied to `data`.
 406        const n = seek - r.seek;
 407        r.seek = seek;
 408        data[i] = buf[copy_len..];
 409        defer data[i] = buf;
 410        return n + (r.vtable.readVec(r, data[i..]) catch |err| switch (err) {
 411            error.EndOfStream => if (n == 0) return error.EndOfStream else 0,
 412            error.ReadFailed => return error.ReadFailed,
 413        });
 414    }
 415    const n = seek - r.seek;
 416    r.seek = seek;
 417    return n;
 418}
 419
 420/// Writes to `Reader.buffer` or `data`, whichever has larger capacity.
 421pub fn defaultReadVec(r: *Reader, data: [][]u8) Error!usize {
 422    const first = data[0];
 423    if (first.len >= r.buffer.len - r.end) {
 424        var writer: Writer = .{
 425            .buffer = first,
 426            .end = 0,
 427            .vtable = &.{ .drain = Writer.fixedDrain },
 428        };
 429        const limit: Limit = .limited(writer.buffer.len - writer.end);
 430        return r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
 431            error.WriteFailed => unreachable,
 432            else => |e| return e,
 433        };
 434    }
 435    var writer: Writer = .{
 436        .buffer = r.buffer,
 437        .end = r.end,
 438        .vtable = &.{ .drain = Writer.fixedDrain },
 439    };
 440    const limit: Limit = .limited(writer.buffer.len - writer.end);
 441    const n = r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
 442        error.WriteFailed => unreachable,
 443        else => |e| return e,
 444    };
 445    r.end += n;
 446    return 0;
 447}
 448
 449pub fn buffered(r: *Reader) []u8 {
 450    return r.buffer[r.seek..r.end];
 451}
 452
 453pub fn bufferedLen(r: *const Reader) usize {
 454    return r.end - r.seek;
 455}
 456
 457pub fn hashed(r: *Reader, hasher: anytype, buffer: []u8) Hashed(@TypeOf(hasher)) {
 458    return .init(r, hasher, buffer);
 459}
 460
 461pub fn readVecAll(r: *Reader, data: [][]u8) Error!void {
 462    var index: usize = 0;
 463    var truncate: usize = 0;
 464    while (index < data.len) {
 465        {
 466            const untruncated = data[index];
 467            data[index] = untruncated[truncate..];
 468            defer data[index] = untruncated;
 469            truncate += try r.readVec(data[index..]);
 470        }
 471        while (index < data.len and truncate >= data[index].len) {
 472            truncate -= data[index].len;
 473            index += 1;
 474        }
 475    }
 476}
 477
 478/// Returns the next `n` bytes from the stream, filling the buffer as
 479/// necessary.
 480///
 481/// Invalidates previously returned values from `peek`.
 482///
 483/// Asserts that the `Reader` was initialized with a buffer capacity at
 484/// least as big as `n`.
 485///
 486/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
 487/// is returned instead.
 488///
 489/// See also:
 490/// * `toss`
 491pub fn peek(r: *Reader, n: usize) Error![]u8 {
 492    try r.fill(n);
 493    return r.buffer[r.seek..][0..n];
 494}
 495
 496/// Returns all the next buffered bytes, after filling the buffer to ensure it
 497/// contains at least `n` bytes.
 498///
 499/// Invalidates previously returned values from `peek` and `peekGreedy`.
 500///
 501/// Asserts that the `Reader` was initialized with a buffer capacity at
 502/// least as big as `n`.
 503///
 504/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
 505/// is returned instead.
 506///
 507/// See also:
 508/// * `peek`
 509/// * `toss`
 510pub fn peekGreedy(r: *Reader, n: usize) Error![]u8 {
 511    try r.fill(n);
 512    return r.buffer[r.seek..r.end];
 513}
 514
 515/// Skips the next `n` bytes from the stream, advancing the seek position. This
 516/// is typically and safely used after `peek`.
 517///
 518/// Asserts that the number of bytes buffered is at least as many as `n`.
 519///
 520/// The "tossed" memory remains alive until a "peek" operation occurs.
 521///
 522/// See also:
 523/// * `peek`.
 524/// * `discard`.
 525pub fn toss(r: *Reader, n: usize) void {
 526    r.seek += n;
 527    assert(r.seek <= r.end);
 528}
 529
 530/// Equivalent to `toss(r.bufferedLen())`.
 531pub fn tossBuffered(r: *Reader) void {
 532    r.seek = r.end;
 533}
 534
 535/// Equivalent to `peek` followed by `toss`.
 536///
 537/// The data returned is invalidated by the next call to `take`, `peek`,
 538/// `fill`, and functions with those prefixes.
 539pub fn take(r: *Reader, n: usize) Error![]u8 {
 540    const result = try r.peek(n);
 541    r.toss(n);
 542    return result;
 543}
 544
 545/// Returns the next `n` bytes from the stream as an array, filling the buffer
 546/// as necessary and advancing the seek position `n` bytes.
 547///
 548/// Asserts that the `Reader` was initialized with a buffer capacity at
 549/// least as big as `n`.
 550///
 551/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
 552/// is returned instead.
 553///
 554/// See also:
 555/// * `take`
 556pub fn takeArray(r: *Reader, comptime n: usize) Error!*[n]u8 {
 557    return (try r.take(n))[0..n];
 558}
 559
 560/// Returns the next `n` bytes from the stream as an array, filling the buffer
 561/// as necessary, without advancing the seek position.
 562///
 563/// Asserts that the `Reader` was initialized with a buffer capacity at
 564/// least as big as `n`.
 565///
 566/// If there are fewer than `n` bytes left in the stream, `error.EndOfStream`
 567/// is returned instead.
 568///
 569/// See also:
 570/// * `peek`
 571/// * `takeArray`
 572pub fn peekArray(r: *Reader, comptime n: usize) Error!*[n]u8 {
 573    return (try r.peek(n))[0..n];
 574}
 575
 576/// Skips the next `n` bytes from the stream, advancing the seek position.
 577///
 578/// Unlike `toss` which is infallible, in this function `n` can be any amount.
 579///
 580/// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded.
 581///
 582/// See also:
 583/// * `toss`
 584/// * `discardRemaining`
 585/// * `discardShort`
 586/// * `discard`
 587pub fn discardAll(r: *Reader, n: usize) Error!void {
 588    if ((try r.discardShort(n)) != n) return error.EndOfStream;
 589}
 590
 591pub fn discardAll64(r: *Reader, n: u64) Error!void {
 592    var remaining: u64 = n;
 593    while (remaining > 0) {
 594        const limited_remaining = std.math.cast(usize, remaining) orelse std.math.maxInt(usize);
 595        try discardAll(r, limited_remaining);
 596        remaining -= limited_remaining;
 597    }
 598}
 599
 600/// Skips the next `n` bytes from the stream, advancing the seek position.
 601///
 602/// Unlike `toss` which is infallible, in this function `n` can be any amount.
 603///
 604/// Returns the number of bytes discarded, which is less than `n` if and only
 605/// if the stream reached the end.
 606///
 607/// See also:
 608/// * `discardAll`
 609/// * `discardRemaining`
 610/// * `discard`
 611pub fn discardShort(r: *Reader, n: usize) ShortError!usize {
 612    const proposed_seek = r.seek + n;
 613    if (proposed_seek <= r.end) {
 614        @branchHint(.likely);
 615        r.seek = proposed_seek;
 616        return n;
 617    }
 618    var remaining = n - (r.end - r.seek);
 619    r.seek = r.end;
 620    while (true) {
 621        const discard_len = r.vtable.discard(r, .limited(remaining)) catch |err| switch (err) {
 622            error.EndOfStream => return n - remaining,
 623            error.ReadFailed => return error.ReadFailed,
 624        };
 625        remaining -= discard_len;
 626        if (remaining == 0) return n;
 627    }
 628}
 629
 630/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
 631/// the seek position.
 632///
 633/// Invalidates previously returned values from `peek`.
 634///
 635/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
 636/// returned instead.
 637///
 638/// See also:
 639/// * `peek`
 640/// * `readSliceShort`
 641pub fn readSliceAll(r: *Reader, buffer: []u8) Error!void {
 642    const n = try readSliceShort(r, buffer);
 643    if (n != buffer.len) return error.EndOfStream;
 644}
 645
 646/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
 647/// the seek position.
 648///
 649/// Invalidates previously returned values from `peek`.
 650///
 651/// Returns the number of bytes read, which is less than `buffer.len` if and
 652/// only if the stream reached the end.
 653///
 654/// See also:
 655/// * `readSliceAll`
 656pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize {
 657    const contents = r.buffer[r.seek..r.end];
 658    const copy_len = @min(buffer.len, contents.len);
 659    @memcpy(buffer[0..copy_len], contents[0..copy_len]);
 660    r.seek += copy_len;
 661    if (buffer.len - copy_len == 0) {
 662        @branchHint(.likely);
 663        return buffer.len;
 664    }
 665    var i: usize = copy_len;
 666    var data: [1][]u8 = undefined;
 667    while (true) {
 668        data[0] = buffer[i..];
 669        i += readVec(r, &data) catch |err| switch (err) {
 670            error.EndOfStream => return i,
 671            error.ReadFailed => return error.ReadFailed,
 672        };
 673        if (buffer.len - i == 0) return buffer.len;
 674    }
 675}
 676
 677/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing
 678/// the seek position.
 679///
 680/// Invalidates previously returned values from `peek`.
 681///
 682/// If the provided buffer cannot be filled completely, `error.EndOfStream` is
 683/// returned instead.
 684///
 685/// The function is inline to avoid the dead code in case `endian` is
 686/// comptime-known and matches host endianness.
 687///
 688/// See also:
 689/// * `readSliceAll`
 690/// * `readSliceEndianAlloc`
 691pub inline fn readSliceEndian(
 692    r: *Reader,
 693    comptime Elem: type,
 694    buffer: []Elem,
 695    endian: std.builtin.Endian,
 696) Error!void {
 697    try readSliceAll(r, @ptrCast(buffer));
 698    if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem);
 699}
 700
 701pub const ReadAllocError = Error || Allocator.Error;
 702
 703/// The function is inline to avoid the dead code in case `endian` is
 704/// comptime-known and matches host endianness.
 705pub inline fn readSliceEndianAlloc(
 706    r: *Reader,
 707    allocator: Allocator,
 708    comptime Elem: type,
 709    len: usize,
 710    endian: std.builtin.Endian,
 711) ReadAllocError![]Elem {
 712    const dest = try allocator.alloc(Elem, len);
 713    errdefer allocator.free(dest);
 714    try readSliceAll(r, @ptrCast(dest));
 715    if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem);
 716    return dest;
 717}
 718
 719/// Shortcut for calling `readSliceAll` with a buffer provided by `allocator`.
 720pub fn readAlloc(r: *Reader, allocator: Allocator, len: usize) ReadAllocError![]u8 {
 721    const dest = try allocator.alloc(u8, len);
 722    errdefer allocator.free(dest);
 723    try readSliceAll(r, dest);
 724    return dest;
 725}
 726
 727pub const DelimiterError = error{
 728    /// See the `Reader` implementation for detailed diagnostics.
 729    ReadFailed,
 730    /// For "inclusive" functions, stream ended before the delimiter was found.
 731    /// For "exclusive" functions, stream ended and there are no more bytes to
 732    /// return.
 733    EndOfStream,
 734    /// The delimiter was not found within a number of bytes matching the
 735    /// capacity of the `Reader`.
 736    StreamTooLong,
 737};
 738
 739/// Returns a slice of the next bytes of buffered data from the stream until
 740/// `sentinel` is found, advancing the seek position past the sentinel.
 741///
 742/// Returned slice has a sentinel.
 743///
 744/// Invalidates previously returned values from `peek`.
 745///
 746/// See also:
 747/// * `peekSentinel`
 748/// * `takeDelimiterExclusive`
 749/// * `takeDelimiterInclusive`
 750pub fn takeSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
 751    const result = try r.peekSentinel(sentinel);
 752    r.toss(result.len + 1);
 753    return result;
 754}
 755
 756/// Returns a slice of the next bytes of buffered data from the stream until
 757/// `sentinel` is found, without advancing the seek position.
 758///
 759/// Returned slice has a sentinel; end of stream does not count as a delimiter.
 760///
 761/// Invalidates previously returned values from `peek`.
 762///
 763/// See also:
 764/// * `takeSentinel`
 765/// * `peekDelimiterExclusive`
 766/// * `peekDelimiterInclusive`
 767pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
 768    const result = try r.peekDelimiterInclusive(sentinel);
 769    return result[0 .. result.len - 1 :sentinel];
 770}
 771
 772/// Returns a slice of the next bytes of buffered data from the stream until
 773/// `delimiter` is found, advancing the seek position past the delimiter.
 774///
 775/// Returned slice includes the delimiter as the last byte.
 776///
 777/// Invalidates previously returned values from `peek`.
 778///
 779/// See also:
 780/// * `takeSentinel`
 781/// * `takeDelimiterExclusive`
 782/// * `peekDelimiterInclusive`
 783pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
 784    const result = try r.peekDelimiterInclusive(delimiter);
 785    r.toss(result.len);
 786    return result;
 787}
 788
 789/// Returns a slice of the next bytes of buffered data from the stream until
 790/// `delimiter` is found, without advancing the seek position.
 791///
 792/// Returned slice includes the delimiter as the last byte.
 793///
 794/// Invalidates previously returned values from `peek`.
 795///
 796/// See also:
 797/// * `peekSentinel`
 798/// * `peekDelimiterExclusive`
 799/// * `takeDelimiterInclusive`
 800pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
 801    {
 802        const contents = r.buffer[0..r.end];
 803        const seek = r.seek;
 804        if (std.mem.findScalarPos(u8, contents, seek, delimiter)) |end| {
 805            @branchHint(.likely);
 806            return contents[seek .. end + 1];
 807        }
 808    }
 809    while (true) {
 810        const content_len = r.end - r.seek;
 811        if (r.buffer.len - content_len == 0) break;
 812        try fillMore(r);
 813        const seek = r.seek;
 814        const contents = r.buffer[0..r.end];
 815        if (std.mem.findScalarPos(u8, contents, seek + content_len, delimiter)) |end| {
 816            return contents[seek .. end + 1];
 817        }
 818    }
 819    // It might or might not be end of stream. There is no more buffer space
 820    // left to disambiguate. If `StreamTooLong` was added to `RebaseError` then
 821    // this logic could be replaced by removing the exit condition from the
 822    // above while loop. That error code would represent when `buffer` capacity
 823    // is too small for an operation, replacing the current use of asserts.
 824    var failing_writer = Writer.failing;
 825    while (r.vtable.stream(r, &failing_writer, .limited(1))) |n| {
 826        assert(n == 0);
 827    } else |err| switch (err) {
 828        error.WriteFailed => return error.StreamTooLong,
 829        error.ReadFailed => |e| return e,
 830        error.EndOfStream => |e| return e,
 831    }
 832}
 833
 834/// Returns a slice of the next bytes of buffered data from the stream until
 835/// `delimiter` is found, advancing the seek position up to (but not past)
 836/// the delimiter.
 837///
 838/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
 839/// to a delimiter, unless it would result in a length 0 return value, in which
 840/// case `error.EndOfStream` is returned instead.
 841///
 842/// If the delimiter is not found within a number of bytes matching the
 843/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
 844/// such case, the stream state is unmodified as if this function was never
 845/// called.
 846///
 847/// Invalidates previously returned values from `peek`.
 848///
 849/// See also:
 850/// * `takeDelimiter`
 851/// * `takeDelimiterInclusive`
 852/// * `peekDelimiterExclusive`
 853pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
 854    const result = try r.peekDelimiterExclusive(delimiter);
 855    r.toss(result.len);
 856    return result;
 857}
 858
 859/// Returns a slice of the next bytes of buffered data from the stream until
 860/// `delimiter` is found, advancing the seek position past the delimiter.
 861///
 862/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
 863/// to a delimiter, unless it would result in a length 0 return value, in which
 864/// case `null` is returned instead.
 865///
 866/// If the delimiter is not found within a number of bytes matching the
 867/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
 868/// such case, the stream state is unmodified as if this function was never
 869/// called.
 870///
 871/// Invalidates previously returned values from `peek`.
 872///
 873/// See also:
 874/// * `takeDelimiterInclusive`
 875/// * `takeDelimiterExclusive`
 876pub fn takeDelimiter(r: *Reader, delimiter: u8) error{ ReadFailed, StreamTooLong }!?[]u8 {
 877    const inclusive = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
 878        error.EndOfStream => {
 879            const remaining = r.buffer[r.seek..r.end];
 880            if (remaining.len == 0) return null;
 881            r.toss(remaining.len);
 882            return remaining;
 883        },
 884        else => |e| return e,
 885    };
 886    r.toss(inclusive.len);
 887    return inclusive[0 .. inclusive.len - 1];
 888}
 889
 890/// Returns a slice of the next bytes of buffered data from the stream until
 891/// `delimiter` is found, without advancing the seek position.
 892///
 893/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
 894/// to a delimiter, unless it would result in a length 0 return value, in which
 895/// case `error.EndOfStream` is returned instead.
 896///
 897/// If the delimiter is not found within a number of bytes matching the
 898/// capacity of this `Reader`, `error.StreamTooLong` is returned. In
 899/// such case, the stream state is unmodified as if this function was never
 900/// called.
 901///
 902/// Invalidates previously returned values from `peek`.
 903///
 904/// See also:
 905/// * `peekDelimiterInclusive`
 906/// * `takeDelimiterExclusive`
 907pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
 908    const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
 909        error.EndOfStream => {
 910            const remaining = r.buffer[r.seek..r.end];
 911            if (remaining.len == 0) return error.EndOfStream;
 912            return remaining;
 913        },
 914        else => |e| return e,
 915    };
 916    return result[0 .. result.len - 1];
 917}
 918
 919/// Appends to `w` contents by reading from the stream until `delimiter` is
 920/// found. Does not write the delimiter itself.
 921///
 922/// Does not discard the delimiter from the `Reader`.
 923///
 924/// Returns number of bytes streamed, which may be zero, or error.EndOfStream
 925/// if the delimiter was not found.
 926///
 927/// Asserts buffer capacity of at least one. This function performs better with
 928/// larger buffers.
 929///
 930/// See also:
 931/// * `streamDelimiterEnding`
 932/// * `streamDelimiterLimit`
 933pub fn streamDelimiter(r: *Reader, w: *Writer, delimiter: u8) StreamError!usize {
 934    const n = streamDelimiterLimit(r, w, delimiter, .unlimited) catch |err| switch (err) {
 935        error.StreamTooLong => unreachable, // unlimited is passed
 936        else => |e| return e,
 937    };
 938    if (r.seek == r.end) return error.EndOfStream;
 939    return n;
 940}
 941
 942/// Appends to `w` contents by reading from the stream until `delimiter` is found.
 943/// Does not write the delimiter itself.
 944///
 945/// Returns number of bytes streamed, which may be zero. If the stream reaches
 946/// the end, the reader buffer will be empty when this function returns.
 947/// Otherwise, it will have at least one byte buffered, starting with the
 948/// delimiter.
 949///
 950/// Asserts buffer capacity of at least one. This function performs better with
 951/// larger buffers.
 952///
 953/// See also:
 954/// * `streamDelimiter`
 955/// * `streamDelimiterLimit`
 956pub fn streamDelimiterEnding(
 957    r: *Reader,
 958    w: *Writer,
 959    delimiter: u8,
 960) StreamRemainingError!usize {
 961    return streamDelimiterLimit(r, w, delimiter, .unlimited) catch |err| switch (err) {
 962        error.StreamTooLong => unreachable, // unlimited is passed
 963        else => |e| return e,
 964    };
 965}
 966
 967pub const StreamDelimiterLimitError = error{
 968    ReadFailed,
 969    WriteFailed,
 970    /// The delimiter was not found within the limit.
 971    StreamTooLong,
 972};
 973
 974/// Appends to `w` contents by reading from the stream until `delimiter` is found.
 975/// Does not write the delimiter itself.
 976///
 977/// Does not discard the delimiter from the `Reader`.
 978///
 979/// Returns number of bytes streamed, which may be zero. End of stream can be
 980/// detected by checking if the next byte in the stream is the delimiter.
 981///
 982/// Asserts buffer capacity of at least one. This function performs better with
 983/// larger buffers.
 984pub fn streamDelimiterLimit(
 985    r: *Reader,
 986    w: *Writer,
 987    delimiter: u8,
 988    limit: Limit,
 989) StreamDelimiterLimitError!usize {
 990    var remaining = @intFromEnum(limit);
 991    while (remaining != 0) {
 992        const available = Limit.limited(remaining).slice(r.peekGreedy(1) catch |err| switch (err) {
 993            error.ReadFailed => return error.ReadFailed,
 994            error.EndOfStream => return @intFromEnum(limit) - remaining,
 995        });
 996        if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| {
 997            try w.writeAll(available[0..delimiter_index]);
 998            r.toss(delimiter_index);
 999            remaining -= delimiter_index;
1000            return @intFromEnum(limit) - remaining;
1001        }
1002        try w.writeAll(available);
1003        r.toss(available.len);
1004        remaining -= available.len;
1005    }
1006    return error.StreamTooLong;
1007}
1008
1009/// Reads from the stream until specified byte is found, discarding all data,
1010/// including the delimiter.
1011///
1012/// Returns number of bytes discarded, or `error.EndOfStream` if the delimiter
1013/// is not found.
1014///
1015/// See also:
1016/// * `discardDelimiterExclusive`
1017/// * `discardDelimiterLimit`
1018pub fn discardDelimiterInclusive(r: *Reader, delimiter: u8) Error!usize {
1019    const n = discardDelimiterLimit(r, delimiter, .unlimited) catch |err| switch (err) {
1020        error.StreamTooLong => unreachable, // unlimited is passed
1021        else => |e| return e,
1022    };
1023    if (r.seek == r.end) return error.EndOfStream;
1024    assert(r.buffer[r.seek] == delimiter);
1025    toss(r, 1);
1026    return n + 1;
1027}
1028
1029/// Reads from the stream until specified byte is found, discarding all data,
1030/// excluding the delimiter.
1031///
1032/// Returns the number of bytes discarded.
1033///
1034/// Succeeds if stream ends before delimiter found. End of stream can be
1035/// detected by checking if the delimiter is buffered.
1036///
1037/// See also:
1038/// * `discardDelimiterInclusive`
1039/// * `discardDelimiterLimit`
1040pub fn discardDelimiterExclusive(r: *Reader, delimiter: u8) ShortError!usize {
1041    return discardDelimiterLimit(r, delimiter, .unlimited) catch |err| switch (err) {
1042        error.StreamTooLong => unreachable, // unlimited is passed
1043        else => |e| return e,
1044    };
1045}
1046
1047pub const DiscardDelimiterLimitError = error{
1048    ReadFailed,
1049    /// The delimiter was not found within the limit.
1050    StreamTooLong,
1051};
1052
1053/// Reads from the stream until specified byte is found, discarding all data,
1054/// excluding the delimiter.
1055///
1056/// Returns the number of bytes discarded.
1057///
1058/// Succeeds if stream ends before delimiter found. End of stream can be
1059/// detected by checking if the delimiter is buffered.
1060pub fn discardDelimiterLimit(r: *Reader, delimiter: u8, limit: Limit) DiscardDelimiterLimitError!usize {
1061    var remaining = @intFromEnum(limit);
1062    while (remaining != 0) {
1063        const available = Limit.limited(remaining).slice(r.peekGreedy(1) catch |err| switch (err) {
1064            error.ReadFailed => return error.ReadFailed,
1065            error.EndOfStream => return @intFromEnum(limit) - remaining,
1066        });
1067        if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| {
1068            r.toss(delimiter_index);
1069            remaining -= delimiter_index;
1070            return @intFromEnum(limit) - remaining;
1071        }
1072        r.toss(available.len);
1073        remaining -= available.len;
1074    }
1075    return error.StreamTooLong;
1076}
1077
1078/// Fills the buffer such that it contains at least `n` bytes, without
1079/// advancing the seek position.
1080///
1081/// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes
1082/// remaining.
1083///
1084/// If the end of stream is not encountered, asserts buffer capacity is at
1085/// least `n`.
1086pub fn fill(r: *Reader, n: usize) Error!void {
1087    if (r.seek + n <= r.end) {
1088        @branchHint(.likely);
1089        return;
1090    }
1091    return fillUnbuffered(r, n);
1092}
1093
1094/// This internal function is separated from `fill` to encourage optimizers to inline `fill`, hence
1095/// propagating its `@branchHint` to usage sites. If these functions are combined, `fill` is large
1096/// enough that LLVM is reluctant to inline it, forcing usages of APIs like `takeInt` to go through
1097/// an expensive runtime function call just to figure out that the data is, in fact, already in the
1098/// buffer.
1099///
1100/// Missing this optimization can result in wall-clock time for the most affected benchmarks
1101/// increasing by a factor of 5 or more.
1102fn fillUnbuffered(r: *Reader, n: usize) Error!void {
1103    try rebase(r, n);
1104    var bufs: [1][]u8 = .{""};
1105    while (r.end < r.seek + n) _ = try r.vtable.readVec(r, &bufs);
1106}
1107
1108/// Without advancing the seek position, does exactly one underlying read, filling the buffer as
1109/// much as possible. This may result in zero bytes added to the buffer, which is not an end of
1110/// stream condition. End of stream is communicated via returning `error.EndOfStream`.
1111///
1112/// Asserts buffer capacity is at least 1.
1113pub fn fillMore(r: *Reader) Error!void {
1114    try rebase(r, r.end - r.seek + 1);
1115    var bufs: [1][]u8 = .{""};
1116    _ = try r.vtable.readVec(r, &bufs);
1117}
1118
1119/// Returns the next byte from the stream or returns `error.EndOfStream`.
1120///
1121/// Does not advance the seek position.
1122///
1123/// Asserts the buffer capacity is nonzero.
1124pub fn peekByte(r: *Reader) Error!u8 {
1125    const buffer = r.buffer[0..r.end];
1126    const seek = r.seek;
1127    if (seek < buffer.len) {
1128        @branchHint(.likely);
1129        return buffer[seek];
1130    }
1131    try fill(r, 1);
1132    return r.buffer[r.seek];
1133}
1134
1135/// Reads 1 byte from the stream or returns `error.EndOfStream`.
1136///
1137/// Asserts the buffer capacity is nonzero.
1138pub fn takeByte(r: *Reader) Error!u8 {
1139    const result = try peekByte(r);
1140    r.seek += 1;
1141    return result;
1142}
1143
1144/// Same as `takeByte` except the returned byte is signed.
1145pub fn takeByteSigned(r: *Reader) Error!i8 {
1146    return @bitCast(try r.takeByte());
1147}
1148
1149/// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`.
1150pub inline fn takeInt(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
1151    const n = @divExact(@typeInfo(T).int.bits, 8);
1152    return std.mem.readInt(T, try r.takeArray(n), endian);
1153}
1154
1155/// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`.
1156pub inline fn peekInt(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
1157    const n = @divExact(@typeInfo(T).int.bits, 8);
1158    return std.mem.readInt(T, try r.peekArray(n), endian);
1159}
1160
1161/// Asserts the buffer was initialized with a capacity at least `n`.
1162pub fn takeVarInt(r: *Reader, comptime Int: type, endian: std.builtin.Endian, n: usize) Error!Int {
1163    assert(n <= @sizeOf(Int));
1164    return std.mem.readVarInt(Int, try r.take(n), endian);
1165}
1166
1167/// Obtains an unaligned pointer to the beginning of the stream, reinterpreted
1168/// as a pointer to the provided type, advancing the seek position.
1169///
1170/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
1171///
1172/// See also:
1173/// * `peekStructPointer`
1174/// * `takeStruct`
1175pub fn takeStructPointer(r: *Reader, comptime T: type) Error!*align(1) T {
1176    // Only extern and packed structs have defined in-memory layout.
1177    comptime assert(@typeInfo(T).@"struct".layout != .auto);
1178    return @ptrCast(try r.takeArray(@sizeOf(T)));
1179}
1180
1181/// Obtains an unaligned pointer to the beginning of the stream, reinterpreted
1182/// as a pointer to the provided type, without advancing the seek position.
1183///
1184/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
1185///
1186/// See also:
1187/// * `takeStructPointer`
1188/// * `peekStruct`
1189pub fn peekStructPointer(r: *Reader, comptime T: type) Error!*align(1) T {
1190    // Only extern and packed structs have defined in-memory layout.
1191    comptime assert(@typeInfo(T).@"struct".layout != .auto);
1192    return @ptrCast(try r.peekArray(@sizeOf(T)));
1193}
1194
1195/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
1196///
1197/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
1198/// when `endian` is comptime-known and matches the host endianness.
1199///
1200/// See also:
1201/// * `takeStructPointer`
1202/// * `peekStruct`
1203pub inline fn takeStruct(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
1204    switch (@typeInfo(T)) {
1205        .@"struct" => |info| switch (info.layout) {
1206            .auto => @compileError("ill-defined memory layout"),
1207            .@"extern" => {
1208                var res = (try r.takeStructPointer(T)).*;
1209                if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
1210                return res;
1211            },
1212            .@"packed" => {
1213                return @bitCast(try takeInt(r, info.backing_integer.?, endian));
1214            },
1215        },
1216        else => @compileError("not a struct"),
1217    }
1218}
1219
1220/// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`.
1221///
1222/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
1223/// when `endian` is comptime-known and matches the host endianness.
1224///
1225/// See also:
1226/// * `takeStruct`
1227/// * `peekStructPointer`
1228pub inline fn peekStruct(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
1229    switch (@typeInfo(T)) {
1230        .@"struct" => |info| switch (info.layout) {
1231            .auto => @compileError("ill-defined memory layout"),
1232            .@"extern" => {
1233                var res = (try r.peekStructPointer(T)).*;
1234                if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
1235                return res;
1236            },
1237            .@"packed" => {
1238                return @bitCast(try peekInt(r, info.backing_integer.?, endian));
1239            },
1240        },
1241        else => @compileError("not a struct"),
1242    }
1243}
1244
1245pub const TakeEnumError = Error || error{InvalidEnumTag};
1246
1247/// Reads an integer with the same size as the given enum's tag type. If the
1248/// integer matches an enum tag, casts the integer to the enum tag and returns
1249/// it. Otherwise, returns `error.InvalidEnumTag`.
1250///
1251/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
1252pub fn takeEnum(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum {
1253    const Tag = @typeInfo(Enum).@"enum".tag_type;
1254    const int = try r.takeInt(Tag, endian);
1255    return std.meta.intToEnum(Enum, int);
1256}
1257
1258/// Reads an integer with the same size as the given nonexhaustive enum's tag type.
1259///
1260/// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`.
1261pub fn takeEnumNonexhaustive(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) Error!Enum {
1262    const info = @typeInfo(Enum).@"enum";
1263    comptime assert(!info.is_exhaustive);
1264    comptime assert(@bitSizeOf(info.tag_type) == @sizeOf(info.tag_type) * 8);
1265    return takeEnum(r, Enum, endian) catch |err| switch (err) {
1266        error.InvalidEnumTag => unreachable,
1267        else => |e| return e,
1268    };
1269}
1270
1271pub const TakeLeb128Error = Error || error{Overflow};
1272
1273/// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit.
1274pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
1275    const result_info = @typeInfo(Result).int;
1276    return std.math.cast(Result, try r.takeMultipleOf7Leb128(@Int(
1277        result_info.signedness,
1278        std.mem.alignForwardAnyAlign(u16, result_info.bits, 7),
1279    ))) orelse error.Overflow;
1280}
1281
1282fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result {
1283    const result_info = @typeInfo(Result).int;
1284    comptime assert(result_info.bits % 7 == 0);
1285    var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits;
1286    const UnsignedResult = @Int(.unsigned, result_info.bits);
1287    var result: UnsignedResult = 0;
1288    var fits = true;
1289    while (true) {
1290        const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try r.peekGreedy(1));
1291        for (buffer, 1..) |byte, len| {
1292            if (remaining_bits > 0) {
1293                result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) |
1294                    if (result_info.bits > 7) @shrExact(result, 7) else 0;
1295                remaining_bits -= 7;
1296            } else if (fits) fits = switch (result_info.signedness) {
1297                .signed => @as(i7, @bitCast(byte.bits)) ==
1298                    @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))),
1299                .unsigned => byte.bits == 0,
1300            };
1301            if (byte.more) continue;
1302            r.toss(len);
1303            return if (fits) @as(Result, @bitCast(result)) >> remaining_bits else error.Overflow;
1304        }
1305        r.toss(buffer.len);
1306    }
1307}
1308
1309/// Ensures `capacity` data can be buffered without rebasing.
1310pub fn rebase(r: *Reader, capacity: usize) RebaseError!void {
1311    if (r.buffer.len - r.seek >= capacity) {
1312        @branchHint(.likely);
1313        return;
1314    }
1315    return r.vtable.rebase(r, capacity);
1316}
1317
1318pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void {
1319    assert(r.buffer.len - r.seek < capacity);
1320    const data = r.buffer[r.seek..r.end];
1321    @memmove(r.buffer[0..data.len], data);
1322    r.seek = 0;
1323    r.end = data.len;
1324    assert(r.buffer.len - r.seek >= capacity);
1325}
1326
1327test fixed {
1328    var r: Reader = .fixed("a\x02");
1329    try testing.expect((try r.takeByte()) == 'a');
1330    try testing.expect((try r.takeEnum(enum(u8) {
1331        a = 0,
1332        b = 99,
1333        c = 2,
1334        d = 3,
1335    }, builtin.cpu.arch.endian())) == .c);
1336    try testing.expectError(error.EndOfStream, r.takeByte());
1337}
1338
1339test peek {
1340    var r: Reader = .fixed("abc");
1341    try testing.expectEqualStrings("ab", try r.peek(2));
1342    try testing.expectEqualStrings("a", try r.peek(1));
1343}
1344
1345test peekGreedy {
1346    var r: Reader = .fixed("abc");
1347    try testing.expectEqualStrings("abc", try r.peekGreedy(1));
1348}
1349
1350test toss {
1351    var r: Reader = .fixed("abc");
1352    r.toss(1);
1353    try testing.expectEqualStrings("bc", r.buffered());
1354}
1355
1356test take {
1357    var r: Reader = .fixed("abc");
1358    try testing.expectEqualStrings("ab", try r.take(2));
1359    try testing.expectEqualStrings("c", try r.take(1));
1360}
1361
1362test takeArray {
1363    var r: Reader = .fixed("abc");
1364    try testing.expectEqualStrings("ab", try r.takeArray(2));
1365    try testing.expectEqualStrings("c", try r.takeArray(1));
1366}
1367
1368test peekArray {
1369    var r: Reader = .fixed("abc");
1370    try testing.expectEqualStrings("ab", try r.peekArray(2));
1371    try testing.expectEqualStrings("a", try r.peekArray(1));
1372}
1373
1374test discardAll {
1375    var r: Reader = .fixed("foobar");
1376    try r.discardAll(3);
1377    try testing.expectEqualStrings("bar", try r.take(3));
1378    try r.discardAll(0);
1379    try testing.expectError(error.EndOfStream, r.discardAll(1));
1380}
1381
1382test discardRemaining {
1383    var r: Reader = .fixed("foobar");
1384    r.toss(1);
1385    try testing.expectEqual(5, try r.discardRemaining());
1386    try testing.expectEqual(0, try r.discardRemaining());
1387}
1388
1389test stream {
1390    var out_buffer: [10]u8 = undefined;
1391    var r: Reader = .fixed("foobar");
1392    var w: Writer = .fixed(&out_buffer);
1393    // Short streams are possible with this function but not with fixed.
1394    try testing.expectEqual(2, try r.stream(&w, .limited(2)));
1395    try testing.expectEqualStrings("fo", w.buffered());
1396    try testing.expectEqual(4, try r.stream(&w, .unlimited));
1397    try testing.expectEqualStrings("foobar", w.buffered());
1398}
1399
1400test takeSentinel {
1401    var r: Reader = .fixed("ab\nc");
1402    try testing.expectEqualStrings("ab", try r.takeSentinel('\n'));
1403    try testing.expectError(error.EndOfStream, r.takeSentinel('\n'));
1404    try testing.expectEqualStrings("c", try r.peek(1));
1405}
1406
1407test peekSentinel {
1408    var r: Reader = .fixed("ab\nc");
1409    try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
1410    try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
1411    r.toss(3);
1412    try testing.expectError(error.EndOfStream, r.peekSentinel('\n'));
1413    try testing.expectEqualStrings("c", try r.peek(1));
1414}
1415
1416test takeDelimiterInclusive {
1417    var r: Reader = .fixed("ab\nc");
1418    try testing.expectEqualStrings("ab\n", try r.takeDelimiterInclusive('\n'));
1419    try testing.expectError(error.EndOfStream, r.takeDelimiterInclusive('\n'));
1420}
1421
1422test peekDelimiterInclusive {
1423    var r: Reader = .fixed("ab\nc");
1424    try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
1425    try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
1426    r.toss(3);
1427    try testing.expectError(error.EndOfStream, r.peekDelimiterInclusive('\n'));
1428    try testing.expectEqualStrings("c", try r.peek(1));
1429}
1430
1431test takeDelimiterExclusive {
1432    var r: Reader = .fixed("ab\nc");
1433
1434    try testing.expectEqualStrings("ab", try r.takeDelimiterExclusive('\n'));
1435    try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
1436    try testing.expectEqualStrings("", try r.takeDelimiterExclusive('\n'));
1437    try testing.expectEqualStrings("\n", try r.take(1));
1438
1439    try testing.expectEqualStrings("c", try r.takeDelimiterExclusive('\n'));
1440    try testing.expectError(error.EndOfStream, r.takeDelimiterExclusive('\n'));
1441}
1442
1443test peekDelimiterExclusive {
1444    var r: Reader = .fixed("ab\nc");
1445
1446    try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
1447    try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
1448    r.toss(2);
1449    try testing.expectEqualStrings("", try r.peekDelimiterExclusive('\n'));
1450    try testing.expectEqualStrings("\n", try r.take(1));
1451
1452    try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
1453    try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
1454    r.toss(1);
1455    try testing.expectError(error.EndOfStream, r.peekDelimiterExclusive('\n'));
1456}
1457
1458test takeDelimiter {
1459    var r: Reader = .fixed("ab\nc\n\nd");
1460    try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
1461    try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
1462    try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
1463    try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
1464    try testing.expectEqual(null, try r.takeDelimiter('\n'));
1465    try testing.expectEqual(null, try r.takeDelimiter('\n'));
1466
1467    r = .fixed("ab\nc\n\nd\n"); // one trailing newline does not affect behavior
1468    try testing.expectEqualStrings("ab", (try r.takeDelimiter('\n')).?);
1469    try testing.expectEqualStrings("c", (try r.takeDelimiter('\n')).?);
1470    try testing.expectEqualStrings("", (try r.takeDelimiter('\n')).?);
1471    try testing.expectEqualStrings("d", (try r.takeDelimiter('\n')).?);
1472    try testing.expectEqual(null, try r.takeDelimiter('\n'));
1473    try testing.expectEqual(null, try r.takeDelimiter('\n'));
1474}
1475
1476test streamDelimiter {
1477    var out_buffer: [10]u8 = undefined;
1478    var r: Reader = .fixed("foo\nbars");
1479    var w: Writer = .fixed(&out_buffer);
1480    try testing.expectEqual(3, try r.streamDelimiter(&w, '\n'));
1481    try testing.expectEqualStrings("foo", w.buffered());
1482    try testing.expectEqual(0, try r.streamDelimiter(&w, '\n'));
1483    r.toss(1);
1484    try testing.expectError(error.EndOfStream, r.streamDelimiter(&w, '\n'));
1485}
1486
1487test streamDelimiterEnding {
1488    var out_buffer: [10]u8 = undefined;
1489    var r: Reader = .fixed("foo\nbars");
1490    var w: Writer = .fixed(&out_buffer);
1491    try testing.expectEqual(3, try r.streamDelimiterEnding(&w, '\n'));
1492    try testing.expectEqualStrings("foo", w.buffered());
1493    r.toss(1);
1494    try testing.expectEqual(4, try r.streamDelimiterEnding(&w, '\n'));
1495    try testing.expectEqualStrings("foobars", w.buffered());
1496    try testing.expectEqual(0, try r.streamDelimiterEnding(&w, '\n'));
1497    try testing.expectEqual(0, try r.streamDelimiterEnding(&w, '\n'));
1498}
1499
1500test streamDelimiterLimit {
1501    var out_buffer: [10]u8 = undefined;
1502    var r: Reader = .fixed("foo\nbars");
1503    var w: Writer = .fixed(&out_buffer);
1504    try testing.expectError(error.StreamTooLong, r.streamDelimiterLimit(&w, '\n', .limited(2)));
1505    try testing.expectEqual(1, try r.streamDelimiterLimit(&w, '\n', .limited(3)));
1506    try testing.expectEqualStrings("\n", try r.take(1));
1507    try testing.expectEqual(4, try r.streamDelimiterLimit(&w, '\n', .unlimited));
1508    try testing.expectEqualStrings("foobars", w.buffered());
1509}
1510
1511test discardDelimiterExclusive {
1512    var r: Reader = .fixed("foob\nar");
1513    try testing.expectEqual(4, try r.discardDelimiterExclusive('\n'));
1514    try testing.expectEqualStrings("\n", try r.take(1));
1515    try testing.expectEqual(2, try r.discardDelimiterExclusive('\n'));
1516    try testing.expectEqual(0, try r.discardDelimiterExclusive('\n'));
1517}
1518
1519test discardDelimiterInclusive {
1520    var r: Reader = .fixed("foob\nar");
1521    try testing.expectEqual(5, try r.discardDelimiterInclusive('\n'));
1522    try testing.expectError(error.EndOfStream, r.discardDelimiterInclusive('\n'));
1523}
1524
1525test discardDelimiterLimit {
1526    var r: Reader = .fixed("foob\nar");
1527    try testing.expectError(error.StreamTooLong, r.discardDelimiterLimit('\n', .limited(4)));
1528    try testing.expectEqual(0, try r.discardDelimiterLimit('\n', .limited(2)));
1529    try testing.expectEqualStrings("\n", try r.take(1));
1530    try testing.expectEqual(2, try r.discardDelimiterLimit('\n', .unlimited));
1531    try testing.expectEqual(0, try r.discardDelimiterLimit('\n', .unlimited));
1532}
1533
1534test fill {
1535    var r: Reader = .fixed("abc");
1536    try r.fill(1);
1537    try r.fill(3);
1538}
1539
1540test takeByte {
1541    var r: Reader = .fixed("ab");
1542    try testing.expectEqual('a', try r.takeByte());
1543    try testing.expectEqual('b', try r.takeByte());
1544    try testing.expectError(error.EndOfStream, r.takeByte());
1545}
1546
1547test takeByteSigned {
1548    var r: Reader = .fixed(&.{ 255, 5 });
1549    try testing.expectEqual(-1, try r.takeByteSigned());
1550    try testing.expectEqual(5, try r.takeByteSigned());
1551    try testing.expectError(error.EndOfStream, r.takeByteSigned());
1552}
1553
1554test takeInt {
1555    var r: Reader = .fixed(&.{ 0x12, 0x34, 0x56 });
1556    try testing.expectEqual(0x1234, try r.takeInt(u16, .big));
1557    try testing.expectError(error.EndOfStream, r.takeInt(u16, .little));
1558}
1559
1560test takeVarInt {
1561    var r: Reader = .fixed(&.{ 0x12, 0x34, 0x56 });
1562    try testing.expectEqual(0x123456, try r.takeVarInt(u64, .big, 3));
1563    try testing.expectError(error.EndOfStream, r.takeVarInt(u16, .little, 1));
1564}
1565
1566test takeStructPointer {
1567    var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
1568    const S = extern struct { a: u8, b: u16 };
1569    switch (native_endian) {
1570        .little => try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.takeStructPointer(S)).*),
1571        .big => try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.takeStructPointer(S)).*),
1572    }
1573    try testing.expectError(error.EndOfStream, r.takeStructPointer(S));
1574}
1575
1576test peekStructPointer {
1577    var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
1578    const S = extern struct { a: u8, b: u16 };
1579    switch (native_endian) {
1580        .little => {
1581            try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.peekStructPointer(S)).*);
1582            try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.peekStructPointer(S)).*);
1583        },
1584        .big => {
1585            try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.peekStructPointer(S)).*);
1586            try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.peekStructPointer(S)).*);
1587        },
1588    }
1589}
1590
1591test takeStruct {
1592    var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
1593    const S = extern struct { a: u8, b: u16 };
1594    try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), try r.takeStruct(S, .big));
1595    try testing.expectError(error.EndOfStream, r.takeStruct(S, .little));
1596}
1597
1598test peekStruct {
1599    var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
1600    const S = extern struct { a: u8, b: u16 };
1601    try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), try r.peekStruct(S, .big));
1602    try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), try r.peekStruct(S, .little));
1603}
1604
1605test takeEnum {
1606    var r: Reader = .fixed(&.{ 2, 0, 1 });
1607    const E1 = enum(u8) { a, b, c };
1608    const E2 = enum(u16) { _ };
1609    try testing.expectEqual(E1.c, try r.takeEnum(E1, .little));
1610    try testing.expectEqual(@as(E2, @enumFromInt(0x0001)), try r.takeEnum(E2, .big));
1611}
1612
1613test takeLeb128 {
1614    var r: Reader = .fixed("\xc7\x9f\x7f\x80");
1615    try testing.expectEqual(-12345, try r.takeLeb128(i64));
1616    try testing.expectEqual(0x80, try r.peekByte());
1617    try testing.expectError(error.EndOfStream, r.takeLeb128(i64));
1618}
1619
1620test readSliceShort {
1621    var r: Reader = .fixed("HelloFren");
1622    var buf: [5]u8 = undefined;
1623    try testing.expectEqual(5, try r.readSliceShort(&buf));
1624    try testing.expectEqualStrings("Hello", buf[0..5]);
1625    try testing.expectEqual(4, try r.readSliceShort(&buf));
1626    try testing.expectEqualStrings("Fren", buf[0..4]);
1627    try testing.expectEqual(0, try r.readSliceShort(&buf));
1628}
1629
1630test "readSliceShort with smaller buffer than Reader" {
1631    var reader_buf: [15]u8 = undefined;
1632    const str = "This is a test";
1633    var one_byte_stream: testing.Reader = .init(&reader_buf, &.{
1634        .{ .buffer = str },
1635    });
1636    one_byte_stream.artificial_limit = .limited(1);
1637
1638    var buf: [14]u8 = undefined;
1639    try testing.expectEqual(14, try one_byte_stream.interface.readSliceShort(&buf));
1640    try testing.expectEqualStrings(str, &buf);
1641}
1642
1643test "readSliceShort with indirect reader" {
1644    var r: Reader = .fixed("HelloFren");
1645    var ri_buf: [3]u8 = undefined;
1646    var ri: std.testing.ReaderIndirect = .init(&r, &ri_buf);
1647    var buf: [5]u8 = undefined;
1648    try testing.expectEqual(5, try ri.interface.readSliceShort(&buf));
1649    try testing.expectEqualStrings("Hello", buf[0..5]);
1650    try testing.expectEqual(4, try ri.interface.readSliceShort(&buf));
1651    try testing.expectEqualStrings("Fren", buf[0..4]);
1652    try testing.expectEqual(0, try ri.interface.readSliceShort(&buf));
1653}
1654
1655test readVec {
1656    var r: Reader = .fixed(std.ascii.letters);
1657    var flat_buffer: [52]u8 = undefined;
1658    var bufs: [2][]u8 = .{
1659        flat_buffer[0..26],
1660        flat_buffer[26..],
1661    };
1662    // Short reads are possible with this function but not with fixed.
1663    try testing.expectEqual(26 * 2, try r.readVec(&bufs));
1664    try testing.expectEqualStrings(std.ascii.letters[0..26], bufs[0]);
1665    try testing.expectEqualStrings(std.ascii.letters[26..], bufs[1]);
1666}
1667
1668test "expected error.EndOfStream" {
1669    // Unit test inspired by https://github.com/ziglang/zig/issues/17733
1670    var buffer: [3]u8 = undefined;
1671    var r: std.Io.Reader = .fixed(&buffer);
1672    r.end = 0; // capacity 3, but empty
1673    try std.testing.expectError(error.EndOfStream, r.takeEnum(enum(u8) { a, b }, .little));
1674    try std.testing.expectError(error.EndOfStream, r.take(3));
1675}
1676
1677test "readVec at end" {
1678    var reader_buffer: [8]u8 = "abcd1234".*;
1679    var reader: testing.Reader = .init(&reader_buffer, &.{});
1680    reader.interface.end = reader_buffer.len;
1681
1682    var out: [16]u8 = undefined;
1683    var vecs: [1][]u8 = .{&out};
1684    try testing.expectEqual(8, try reader.interface.readVec(&vecs));
1685    try testing.expectEqualStrings("abcd1234", vecs[0][0..8]);
1686}
1687
1688fn endingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
1689    _ = r;
1690    _ = w;
1691    _ = limit;
1692    return error.EndOfStream;
1693}
1694
1695fn endingReadVec(r: *Reader, data: [][]u8) Error!usize {
1696    _ = r;
1697    _ = data;
1698    return error.EndOfStream;
1699}
1700
1701fn endingDiscard(r: *Reader, limit: Limit) Error!usize {
1702    _ = r;
1703    _ = limit;
1704    return error.EndOfStream;
1705}
1706
1707fn endingRebase(r: *Reader, capacity: usize) RebaseError!void {
1708    _ = r;
1709    _ = capacity;
1710    return error.EndOfStream;
1711}
1712
1713fn failingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
1714    _ = r;
1715    _ = w;
1716    _ = limit;
1717    return error.ReadFailed;
1718}
1719
1720fn failingDiscard(r: *Reader, limit: Limit) Error!usize {
1721    _ = r;
1722    _ = limit;
1723    return error.ReadFailed;
1724}
1725
1726test "discardAll that has to call discard multiple times on an indirect reader" {
1727    var fr: Reader = .fixed("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
1728    var indirect_buffer: [3]u8 = undefined;
1729    var tri: std.testing.ReaderIndirect = .init(&fr, &indirect_buffer);
1730    const r = &tri.interface;
1731
1732    try r.discardAll(10);
1733    var remaining_buf: [16]u8 = undefined;
1734    try r.readSliceAll(&remaining_buf);
1735    try std.testing.expectEqualStrings(fr.buffer[10..], remaining_buf[0..]);
1736}
1737
1738test "readAlloc when the backing reader provides one byte at a time" {
1739    const str = "This is a test";
1740    var tiny_buffer: [1]u8 = undefined;
1741    var one_byte_stream: testing.Reader = .init(&tiny_buffer, &.{
1742        .{ .buffer = str },
1743    });
1744    one_byte_stream.artificial_limit = .limited(1);
1745    const res = try one_byte_stream.interface.allocRemaining(std.testing.allocator, .unlimited);
1746    defer std.testing.allocator.free(res);
1747    try std.testing.expectEqualStrings(str, res);
1748}
1749
1750test "takeDelimiterInclusive when it rebases" {
1751    const written_line = "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n";
1752    var buffer: [128]u8 = undefined;
1753    var tr: std.testing.Reader = .init(&buffer, &.{
1754        .{ .buffer = written_line },
1755        .{ .buffer = written_line },
1756        .{ .buffer = written_line },
1757        .{ .buffer = written_line },
1758        .{ .buffer = written_line },
1759        .{ .buffer = written_line },
1760    });
1761    const r = &tr.interface;
1762    for (0..6) |_| {
1763        try std.testing.expectEqualStrings(written_line, try r.takeDelimiterInclusive('\n'));
1764    }
1765}
1766
1767test "takeDelimiterInclusive on an indirect reader when it rebases" {
1768    const written_line = "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n";
1769    var buffer: [128]u8 = undefined;
1770    var tr: std.testing.Reader = .init(&buffer, &.{
1771        .{ .buffer = written_line[0..4] },
1772        .{ .buffer = written_line[4..] },
1773        .{ .buffer = written_line },
1774        .{ .buffer = written_line },
1775        .{ .buffer = written_line },
1776        .{ .buffer = written_line },
1777        .{ .buffer = written_line },
1778    });
1779    var indirect_buffer: [128]u8 = undefined;
1780    var tri: std.testing.ReaderIndirect = .init(&tr.interface, &indirect_buffer);
1781    const r = &tri.interface;
1782    for (0..6) |_| {
1783        try std.testing.expectEqualStrings(written_line, try r.takeDelimiterInclusive('\n'));
1784    }
1785}
1786
1787test "takeStruct and peekStruct packed" {
1788    var r: Reader = .fixed(&.{ 0b11110000, 0b00110011 });
1789    const S = packed struct(u16) { a: u2, b: u6, c: u7, d: u1 };
1790
1791    try testing.expectEqual(@as(S, .{
1792        .a = 0b11,
1793        .b = 0b001100,
1794        .c = 0b1110000,
1795        .d = 0b1,
1796    }), try r.peekStruct(S, .big));
1797
1798    try testing.expectEqual(@as(S, .{
1799        .a = 0b11,
1800        .b = 0b001100,
1801        .c = 0b1110000,
1802        .d = 0b1,
1803    }), try r.takeStruct(S, .big));
1804
1805    try testing.expectError(error.EndOfStream, r.takeStruct(S, .little));
1806}
1807
1808/// Provides a `Reader` implementation by passing data from an underlying
1809/// reader through `Hasher.update`.
1810///
1811/// The underlying reader is best unbuffered.
1812///
1813/// This implementation makes suboptimal buffering decisions due to being
1814/// generic. A better solution will involve creating a reader for each hash
1815/// function, where the discard buffer can be tailored to the hash
1816/// implementation details.
1817pub fn Hashed(comptime Hasher: type) type {
1818    return struct {
1819        in: *Reader,
1820        hasher: Hasher,
1821        reader: Reader,
1822
1823        pub fn init(in: *Reader, hasher: Hasher, buffer: []u8) @This() {
1824            return .{
1825                .in = in,
1826                .hasher = hasher,
1827                .reader = .{
1828                    .vtable = &.{
1829                        .stream = @This().stream,
1830                        .readVec = @This().readVec,
1831                        .discard = @This().discard,
1832                    },
1833                    .buffer = buffer,
1834                    .end = 0,
1835                    .seek = 0,
1836                },
1837            };
1838        }
1839
1840        fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
1841            const this: *@This() = @alignCast(@fieldParentPtr("reader", r));
1842            const data = limit.slice(try w.writableSliceGreedy(1));
1843            var vec: [1][]u8 = .{data};
1844            const n = try this.in.readVec(&vec);
1845            this.hasher.update(data[0..n]);
1846            w.advance(n);
1847            return n;
1848        }
1849
1850        fn readVec(r: *Reader, data: [][]u8) Error!usize {
1851            const this: *@This() = @alignCast(@fieldParentPtr("reader", r));
1852            var vecs: [8][]u8 = undefined; // Arbitrarily chosen amount.
1853            const dest_n, const data_size = try r.writableVector(&vecs, data);
1854            const dest = vecs[0..dest_n];
1855            const n = try this.in.readVec(dest);
1856            var remaining: usize = n;
1857            for (dest) |slice| {
1858                if (remaining < slice.len) {
1859                    this.hasher.update(slice[0..remaining]);
1860                    remaining = 0;
1861                    break;
1862                } else {
1863                    remaining -= slice.len;
1864                    this.hasher.update(slice);
1865                }
1866            }
1867            assert(remaining == 0);
1868            if (n > data_size) {
1869                r.end += n - data_size;
1870                return data_size;
1871            }
1872            return n;
1873        }
1874
1875        fn discard(r: *Reader, limit: Limit) Error!usize {
1876            const this: *@This() = @alignCast(@fieldParentPtr("reader", r));
1877            const peeked = limit.slice(try this.in.peekGreedy(1));
1878            this.hasher.update(peeked);
1879            this.in.toss(peeked.len);
1880            return peeked.len;
1881        }
1882    };
1883}
1884
1885pub fn writableVectorPosix(r: *Reader, buffer: []std.posix.iovec, data: []const []u8) Error!struct { usize, usize } {
1886    var i: usize = 0;
1887    var n: usize = 0;
1888    if (r.seek == r.end) {
1889        for (data) |buf| {
1890            if (buffer.len - i == 0) return .{ i, n };
1891            if (buf.len != 0) {
1892                buffer[i] = .{ .base = buf.ptr, .len = buf.len };
1893                i += 1;
1894                n += buf.len;
1895            }
1896        }
1897        const buf = r.buffer;
1898        if (buf.len != 0) {
1899            r.seek = 0;
1900            r.end = 0;
1901            buffer[i] = .{ .base = buf.ptr, .len = buf.len };
1902            i += 1;
1903        }
1904    } else {
1905        const buf = r.buffer[r.end..];
1906        buffer[i] = .{ .base = buf.ptr, .len = buf.len };
1907        i += 1;
1908    }
1909    return .{ i, n };
1910}
1911
1912pub fn writableVectorWsa(
1913    r: *Reader,
1914    buffer: []std.os.windows.ws2_32.WSABUF,
1915    data: []const []u8,
1916) Error!struct { usize, usize } {
1917    var i: usize = 0;
1918    var n: usize = 0;
1919    if (r.seek == r.end) {
1920        for (data) |buf| {
1921            if (buffer.len - i == 0) return .{ i, n };
1922            if (buf.len == 0) continue;
1923            if (std.math.cast(u32, buf.len)) |len| {
1924                buffer[i] = .{ .buf = buf.ptr, .len = len };
1925                i += 1;
1926                n += len;
1927                continue;
1928            }
1929            buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
1930            i += 1;
1931            n += std.math.maxInt(u32);
1932            return .{ i, n };
1933        }
1934        const buf = r.buffer;
1935        if (buf.len != 0) {
1936            r.seek = 0;
1937            r.end = 0;
1938            if (std.math.cast(u32, buf.len)) |len| {
1939                buffer[i] = .{ .buf = buf.ptr, .len = len };
1940            } else {
1941                buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
1942            }
1943            i += 1;
1944        }
1945    } else {
1946        buffer[i] = .{
1947            .buf = r.buffer.ptr + r.end,
1948            .len = @min(std.math.maxInt(u32), r.buffer.len - r.end),
1949        };
1950        i += 1;
1951    }
1952    return .{ i, n };
1953}
1954
1955pub fn writableVector(r: *Reader, buffer: [][]u8, data: []const []u8) Error!struct { usize, usize } {
1956    var i: usize = 0;
1957    var n: usize = 0;
1958    if (r.seek == r.end) {
1959        for (data) |buf| {
1960            if (buffer.len - i == 0) return .{ i, n };
1961            if (buf.len != 0) {
1962                buffer[i] = buf;
1963                i += 1;
1964                n += buf.len;
1965            }
1966        }
1967        if (r.buffer.len != 0) {
1968            r.seek = 0;
1969            r.end = 0;
1970            buffer[i] = r.buffer;
1971            i += 1;
1972        }
1973    } else {
1974        buffer[i] = r.buffer[r.end..];
1975        i += 1;
1976    }
1977    return .{ i, n };
1978}
1979
1980test "deserialize signed LEB128" {
1981    // Truncated
1982    try testing.expectError(error.EndOfStream, testLeb128(i64, "\x80"));
1983
1984    // Overflow
1985    try testing.expectError(error.Overflow, testLeb128(i8, "\x80\x80\x40"));
1986    try testing.expectError(error.Overflow, testLeb128(i16, "\x80\x80\x80\x40"));
1987    try testing.expectError(error.Overflow, testLeb128(i32, "\x80\x80\x80\x80\x40"));
1988    try testing.expectError(error.Overflow, testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
1989    try testing.expectError(error.Overflow, testLeb128(i8, "\xff\x7e"));
1990    try testing.expectError(error.Overflow, testLeb128(i32, "\x80\x80\x80\x80\x08"));
1991    try testing.expectError(error.Overflow, testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01"));
1992
1993    // Decode SLEB128
1994    try testing.expect((try testLeb128(i64, "\x00")) == 0);
1995    try testing.expect((try testLeb128(i64, "\x01")) == 1);
1996    try testing.expect((try testLeb128(i64, "\x3f")) == 63);
1997    try testing.expect((try testLeb128(i64, "\x40")) == -64);
1998    try testing.expect((try testLeb128(i64, "\x41")) == -63);
1999    try testing.expect((try testLeb128(i64, "\x7f")) == -1);
2000    try testing.expect((try testLeb128(i64, "\x80\x01")) == 128);
2001    try testing.expect((try testLeb128(i64, "\x81\x01")) == 129);
2002    try testing.expect((try testLeb128(i64, "\xff\x7e")) == -129);
2003    try testing.expect((try testLeb128(i64, "\x80\x7f")) == -128);
2004    try testing.expect((try testLeb128(i64, "\x81\x7f")) == -127);
2005    try testing.expect((try testLeb128(i64, "\xc0\x00")) == 64);
2006    try testing.expect((try testLeb128(i64, "\xc7\x9f\x7f")) == -12345);
2007    try testing.expect((try testLeb128(i8, "\xff\x7f")) == -1);
2008    try testing.expect((try testLeb128(i16, "\xff\xff\x7f")) == -1);
2009    try testing.expect((try testLeb128(i32, "\xff\xff\xff\xff\x7f")) == -1);
2010    try testing.expect((try testLeb128(i32, "\x80\x80\x80\x80\x78")) == -0x80000000);
2011    try testing.expect((try testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == @as(i64, @bitCast(@as(u64, @intCast(0x8000000000000000)))));
2012    try testing.expect((try testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x40")) == -0x4000000000000000);
2013    try testing.expect((try testLeb128(i64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x7f")) == -0x8000000000000000);
2014
2015    // Decode unnormalized SLEB128 with extra padding bytes.
2016    try testing.expect((try testLeb128(i64, "\x80\x00")) == 0);
2017    try testing.expect((try testLeb128(i64, "\x80\x80\x00")) == 0);
2018    try testing.expect((try testLeb128(i64, "\xff\x00")) == 0x7f);
2019    try testing.expect((try testLeb128(i64, "\xff\x80\x00")) == 0x7f);
2020    try testing.expect((try testLeb128(i64, "\x80\x81\x00")) == 0x80);
2021    try testing.expect((try testLeb128(i64, "\x80\x81\x80\x00")) == 0x80);
2022}
2023
2024test "deserialize unsigned LEB128" {
2025    // Truncated
2026    try testing.expectError(error.EndOfStream, testLeb128(u64, "\x80"));
2027    try testing.expectError(error.EndOfStream, testLeb128(u16, "\x80\x80\x84"));
2028    try testing.expectError(error.EndOfStream, testLeb128(u32, "\x80\x80\x80\x80\x90"));
2029
2030    // Overflow
2031    try testing.expectError(error.Overflow, testLeb128(u8, "\x80\x02"));
2032    try testing.expectError(error.Overflow, testLeb128(u8, "\x80\x80\x40"));
2033    try testing.expectError(error.Overflow, testLeb128(u16, "\x80\x80\x80\x40"));
2034    try testing.expectError(error.Overflow, testLeb128(u32, "\x80\x80\x80\x80\x40"));
2035    try testing.expectError(error.Overflow, testLeb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x40"));
2036
2037    // Decode ULEB128
2038    try testing.expect((try testLeb128(u64, "\x00")) == 0);
2039    try testing.expect((try testLeb128(u64, "\x01")) == 1);
2040    try testing.expect((try testLeb128(u64, "\x3f")) == 63);
2041    try testing.expect((try testLeb128(u64, "\x40")) == 64);
2042    try testing.expect((try testLeb128(u64, "\x7f")) == 0x7f);
2043    try testing.expect((try testLeb128(u64, "\x80\x01")) == 0x80);
2044    try testing.expect((try testLeb128(u64, "\x81\x01")) == 0x81);
2045    try testing.expect((try testLeb128(u64, "\x90\x01")) == 0x90);
2046    try testing.expect((try testLeb128(u64, "\xff\x01")) == 0xff);
2047    try testing.expect((try testLeb128(u64, "\x80\x02")) == 0x100);
2048    try testing.expect((try testLeb128(u64, "\x81\x02")) == 0x101);
2049    try testing.expect((try testLeb128(u64, "\x80\xc1\x80\x80\x10")) == 4294975616);
2050    try testing.expect((try testLeb128(u64, "\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01")) == 0x8000000000000000);
2051
2052    // Decode ULEB128 with extra padding bytes
2053    try testing.expect((try testLeb128(u64, "\x80\x00")) == 0);
2054    try testing.expect((try testLeb128(u64, "\x80\x80\x00")) == 0);
2055    try testing.expect((try testLeb128(u64, "\xff\x00")) == 0x7f);
2056    try testing.expect((try testLeb128(u64, "\xff\x80\x00")) == 0x7f);
2057    try testing.expect((try testLeb128(u64, "\x80\x81\x00")) == 0x80);
2058    try testing.expect((try testLeb128(u64, "\x80\x81\x80\x00")) == 0x80);
2059}
2060
2061fn testLeb128(comptime T: type, encoded: []const u8) !T {
2062    var reader: std.Io.Reader = .fixed(encoded);
2063    const result = try reader.takeLeb128(T);
2064    try testing.expect(reader.seek == reader.end);
2065    return result;
2066}
2067
2068test {
2069    _ = Limited;
2070}