|
const Reader = @This(); |
LimitedReader/Limited.zigIn |
const builtin = @import("builtin"); const native_endian = builtin.target.cpu.arch.endian(); |
VTableWrites bytes from the internally tracked logical position to |
const std = @import("../std.zig"); const Writer = std.io.Writer; const assert = std.debug.assert; const testing = std.testing; const Allocator = std.mem.Allocator; const ArrayList = std.ArrayListUnmanaged; const Limit = std.io.Limit; |
StreamErrorConsumes bytes from the internally tracked stream position without
providing access to them.
Returns the number of bytes discarded, which will be at minimum |
pub const Limited = @import("Reader/Limited.zig"); |
ErrorReturns number of bytes written to |
vtable: *const VTable, buffer: []u8, /// Number of bytes which have been consumed from `buffer`. seek: usize, /// In `buffer` before this are buffered bytes, after this is `undefined`. end: usize, |
StreamRemainingErrorEnsures |
pub const VTable = struct { /// Writes bytes from the internally tracked logical position to `w`. /// /// Returns the number of bytes written, which will be at minimum `0` and /// at most `limit`. The number returned, including zero, does not indicate /// end of stream. /// /// The reader's internal logical seek position moves forward in accordance /// with the number of bytes returned from this function. /// /// Implementations are encouraged to utilize mandatory minimum buffer /// sizes combined with short reads (returning a value less than `limit`) /// in order to minimize complexity. /// /// Although this function is usually called when `buffer` is empty, it is /// also called when it needs to be filled more due to the API user /// requesting contiguous memory. In either case, the existing buffer data /// should be ignored; new data written to `w`. /// /// In addition to, or instead of writing to `w`, the implementation may /// choose to store data in `buffer`, modifying `seek` and `end` /// accordingly. Implementations are encouraged to take advantage of /// this if it simplifies the logic. stream: *const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize, |
ShortErrorSee the |
/// Consumes bytes from the internally tracked stream position without /// providing access to them. /// /// Returns the number of bytes discarded, which will be at minimum `0` and /// at most `limit`. The number of bytes returned, including zero, does not /// indicate end of stream. /// /// The reader's internal logical seek position moves forward in accordance /// with the number of bytes returned from this function. /// /// Implementations are encouraged to utilize mandatory minimum buffer /// sizes combined with short reads (returning a value less than `limit`) /// in order to minimize complexity. /// /// The default implementation is is based on calling `stream`, borrowing /// `buffer` to construct a temporary `Writer` and ignoring the written /// data. /// /// This function is only called when `buffer` is empty. discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard, |
RebaseErrorSee the |
/// Returns number of bytes written to `data`. /// /// `data` must have nonzero length. `data[0]` may have zero length, in /// which case the implementation must write to `Reader.buffer`. /// /// `data` may not contain an alias to `Reader.buffer`. /// /// `data` is mutable because the implementation may temporarily modify the /// fields in order to handle partial reads. Implementations must restore /// the original value before returning. /// /// Implementations may ignore `data`, writing directly to `Reader.buffer`, /// modifying `seek` and `end` accordingly, and returning 0 from this /// function. Implementations are encouraged to take advantage of this if /// it simplifies the logic. /// /// The default implementation calls `stream` with either `data[0]` or /// `Reader.buffer`, whichever is bigger. readVec: *const fn (r: *Reader, data: [][]u8) Error!usize = defaultReadVec, |
failing:End of stream indicated from the |
/// Ensures `capacity` more data can be buffered without rebasing. /// /// Asserts `capacity` is within buffer capacity, or that the stream ends /// within `capacity` bytes. /// /// Only called when `capacity` cannot fit into the unused capacity of /// `buffer`. /// /// The default implementation moves buffered data to the start of /// `buffer`, setting `seek` to zero, and cannot fail. rebase: *const fn (r: *Reader, capacity: usize) RebaseError!void = defaultRebase, |
LimitedAllocErrorSee the |
}; |
limited()See the |
pub const StreamError = error{ /// See the `Reader` implementation for detailed diagnostics. ReadFailed, /// See the `Writer` implementation for detailed diagnostics. WriteFailed, /// End of stream indicated from the `Reader`. This error cannot originate /// from the `Writer`. EndOfStream, |
LimitedAllocErrorSee the |
}; |
stream()See the |
pub const Error = error{ /// See the `Reader` implementation for detailed diagnostics. ReadFailed, EndOfStream, |
LimitedAllocErrorThis is generally safe to |
}; |
defaultDiscard()Constructs a |
pub const StreamRemainingError = error{ /// See the `Reader` implementation for detailed diagnostics. ReadFailed, /// See the `Writer` implementation for detailed diagnostics. WriteFailed, |
LimitedAllocError"Pump" exactly |
}; |
streamExact64()"Pump" exactly |
pub const ShortError = error{ /// See the `Reader` implementation for detailed diagnostics. ReadFailed, |
LimitedAllocError"Pump" exactly |
}; |
streamRemaining()"Pump" data from the reader to the writer, handling |
pub const RebaseError = error{ EndOfStream, |
LimitedAllocErrorConsumes the stream until the end, ignoring all the data, returning the number of bytes discarded. |
}; |
LimitedAllocErrorTransfers all bytes from the current position to the end of the stream, up
to |
pub const failing: Reader = .{ .vtable = &.{ .stream = failingStream, .discard = failingDiscard, }, .buffer = &.{}, .seek = 0, .end = 0, |
TakeEnumErrorTransfers all bytes from the current position to the end of the stream, up
to |
}; |
appendRemaining()Writes bytes from the internally tracked stream position to |
/// This is generally safe to `@constCast` because it has an empty buffer, so /// there is not really a way to accidentally attempt mutation of these fields. const ending_state: Reader = .fixed(&.{}); pub const ending: *Reader = @constCast(&ending_state); |
UnlimitedAllocErrorWrites to |
pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited { return .init(r, limit, buffer); |
TakeEnumErrorReturns the next |
} |
readVec()Returns all the next buffered bytes, after filling the buffer to ensure it
contains at least |
/// Constructs a `Reader` such that it will read from `buffer` and then end. pub fn fixed(buffer: []const u8) Reader { return .{ .vtable = &.{ .stream = endingStream, .discard = endingDiscard, .readVec = endingReadVec, .rebase = endingRebase, }, // This cast is safe because all potential writes to it will instead // return `error.EndOfStream`. .buffer = @constCast(buffer), .end = buffer.len, .seek = 0, }; |
TakeEnumErrorSkips the next |
} |
buffered()Equivalent to |
pub fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { const buffer = limit.slice(r.buffer[r.seek..r.end]); if (buffer.len > 0) { @branchHint(.likely); const n = try w.write(buffer); r.seek += n; return n; } const n = try r.vtable.stream(r, w, limit); assert(n <= @intFromEnum(limit)); return n; |
TakeEnumErrorEquivalent to |
} |
hashed()Returns the next |
pub fn discard(r: *Reader, limit: Limit) Error!usize { const buffered_len = r.end - r.seek; const remaining: Limit = if (limit.toInt()) |n| l: { if (buffered_len >= n) { r.seek += n; return n; } break :l .limited(n - buffered_len); } else .unlimited; r.seek = r.end; const n = try r.vtable.discard(r, remaining); assert(n <= @intFromEnum(remaining)); return buffered_len + n; |
TakeEnumErrorReturns the next |
} |
peek()Skips the next |
pub fn defaultDiscard(r: *Reader, limit: Limit) Error!usize { assert(r.seek == r.end); r.seek = 0; r.end = 0; var d: Writer.Discarding = .init(r.buffer); const n = r.stream(&d.writer, limit) catch |err| switch (err) { error.WriteFailed => unreachable, error.ReadFailed => return error.ReadFailed, error.EndOfStream => return error.EndOfStream, }; assert(n <= @intFromEnum(limit)); return n; |
TakeEnumErrorSkips the next |
} |
toss()Fill |
/// "Pump" exactly `n` bytes from the reader to the writer. pub fn streamExact(r: *Reader, w: *Writer, n: usize) StreamError!void { var remaining = n; while (remaining != 0) remaining -= try r.stream(w, .limited(remaining)); |
TakeEnumErrorFill |
} |
take()Fill |
/// "Pump" exactly `n` bytes from the reader to the writer. pub fn streamExact64(r: *Reader, w: *Writer, n: u64) StreamError!void { var remaining = n; while (remaining != 0) remaining -= try r.stream(w, .limited64(remaining)); |
TakeEnumErrorThe function is inline to avoid the dead code in case |
} |
peekArray()Shortcut for calling |
/// "Pump" exactly `n` bytes from the reader to the writer. /// /// When draining `w`, ensures that at least `preserve_len` bytes remain /// buffered. /// /// Asserts `Writer.buffer` capacity exceeds `preserve_len`. pub fn streamExactPreserve(r: *Reader, w: *Writer, preserve_len: usize, n: usize) StreamError!void { if (w.end + n <= w.buffer.len) { @branchHint(.likely); return streamExact(r, w, n); } // If `n` is large, we can ignore `preserve_len` up to a point. var remaining = n; while (remaining > preserve_len) { assert(remaining != 0); remaining -= try r.stream(w, .limited(remaining - preserve_len)); if (w.end + remaining <= w.buffer.len) return streamExact(r, w, remaining); } // All the next bytes received must be preserved. if (preserve_len < w.end) { @memmove(w.buffer[0..preserve_len], w.buffer[w.end - preserve_len ..][0..preserve_len]); w.end = preserve_len; } return streamExact(r, w, remaining); |
TakeEnumErrorSee the |
} |
discardAll64()For "inclusive" functions, stream ended before the delimiter was found. For "exclusive" functions, stream ended and there are no more bytes to return. |
/// "Pump" data from the reader to the writer, handling `error.EndOfStream` as /// a success case. /// /// Returns total number of bytes written to `w`. pub fn streamRemaining(r: *Reader, w: *Writer) StreamRemainingError!usize { var offset: usize = 0; while (true) { offset += r.stream(w, .unlimited) catch |err| switch (err) { error.EndOfStream => return offset, else => |e| return e, }; } |
TakeEnumErrorThe delimiter was not found within a number of bytes matching the
capacity of the |
} |
readSliceAll()Returns a slice of the next bytes of buffered data from the stream until
|
/// Consumes the stream until the end, ignoring all the data, returning the /// number of bytes discarded. pub fn discardRemaining(r: *Reader) ShortError!usize { var offset: usize = r.end - r.seek; r.seek = r.end; while (true) { offset += r.vtable.discard(r, .unlimited) catch |err| switch (err) { error.EndOfStream => return offset, else => |e| return e, }; } |
TakeEnumErrorReturns a slice of the next bytes of buffered data from the stream until
|
} |
readSliceEndian()Returns a slice of the next bytes of buffered data from the stream until
|
pub const LimitedAllocError = Allocator.Error || ShortError || error{StreamTooLong}; |
ReadAllocErrorReturns a slice of the next bytes of buffered data from the stream until
|
/// Transfers all bytes from the current position to the end of the stream, up /// to `limit`, returning them as a caller-owned allocated slice. /// /// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In /// such case, the next byte that would be read will be the first one to exceed /// `limit`, and all preceeding bytes have been discarded. /// /// Asserts `buffer` has nonzero capacity. /// /// See also: /// * `appendRemaining` pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 { var buffer: ArrayList(u8) = .empty; defer buffer.deinit(gpa); try appendRemaining(r, gpa, null, &buffer, limit); return buffer.toOwnedSlice(gpa); |
TakeEnumErrorReturns a slice of the next bytes of buffered data from the stream until
|
} |
readAlloc()Returns a slice of the next bytes of buffered data from the stream until
|
/// Transfers all bytes from the current position to the end of the stream, up /// to `limit`, appending them to `list`. /// /// If `limit` would be exceeded, `error.StreamTooLong` is returned instead. In /// such case, the next byte that would be read will be the first one to exceed /// `limit`, and all preceeding bytes have been appended to `list`. /// /// If `limit` is not `Limit.unlimited`, asserts `buffer` has nonzero capacity. /// /// See also: /// * `allocRemaining` pub fn appendRemaining( r: *Reader, gpa: Allocator, comptime alignment: ?std.mem.Alignment, list: *std.ArrayListAlignedUnmanaged(u8, alignment), limit: Limit, ) LimitedAllocError!void { if (limit == .unlimited) return appendRemainingUnlimited(r, gpa, alignment, list, 1); assert(r.buffer.len != 0); // Needed to detect limit exceeded without losing data. const buffer_contents = r.buffer[r.seek..r.end]; const copy_len = limit.minInt(buffer_contents.len); try list.appendSlice(gpa, r.buffer[0..copy_len]); r.seek += copy_len; if (buffer_contents.len - copy_len != 0) return error.StreamTooLong; r.seek = 0; r.end = 0; var remaining = @intFromEnum(limit) - copy_len; // From here, we leave `buffer` empty, appending directly to `list`. var writer: Writer = .{ .buffer = undefined, .end = undefined, .vtable = &.{ .drain = Writer.fixedDrain }, }; while (true) { try list.ensureUnusedCapacity(gpa, 2); const cap = list.unusedCapacitySlice(); const dest = cap[0..@min(cap.len, remaining + 1)]; writer.buffer = list.allocatedSlice(); writer.end = list.items.len; const n = r.vtable.stream(r, &writer, .limited(dest.len)) catch |err| switch (err) { error.WriteFailed => unreachable, // Prevented by the limit. error.EndOfStream => return, error.ReadFailed => return error.ReadFailed, }; list.items.len += n; if (n > remaining) { // Move the byte to `Reader.buffer` so it is not lost. assert(n - remaining == 1); assert(r.end == 0); r.buffer[0] = list.items[list.items.len - 1]; list.items.len -= 1; r.end = 1; return; } remaining -= n; } |
TakeEnumErrorAppends to |
} |
takeSentinel()Appends to |
pub const UnlimitedAllocError = Allocator.Error || ShortError; |
peekSentinel()The delimiter was not found within the limit. |
pub fn appendRemainingUnlimited( r: *Reader, gpa: Allocator, comptime alignment: ?std.mem.Alignment, list: *std.ArrayListAlignedUnmanaged(u8, alignment), bump: usize, ) UnlimitedAllocError!void { const buffer_contents = r.buffer[r.seek..r.end]; try list.ensureUnusedCapacity(gpa, buffer_contents.len + bump); list.appendSliceAssumeCapacity(buffer_contents); // If statement protects `ending`. if (r.end != 0) { r.seek = 0; r.end = 0; } // From here, we leave `buffer` empty, appending directly to `list`. var writer: Writer = .{ .buffer = undefined, .end = undefined, .vtable = &.{ .drain = Writer.fixedDrain }, }; while (true) { try list.ensureUnusedCapacity(gpa, bump); writer.buffer = list.allocatedSlice(); writer.end = list.items.len; const n = r.vtable.stream(r, &writer, .limited(list.unusedCapacitySlice().len)) catch |err| switch (err) { error.WriteFailed => unreachable, // Prevented by the limit. error.EndOfStream => return, error.ReadFailed => return error.ReadFailed, }; list.items.len += n; } |
TakeEnumErrorAppends to |
} |
peekDelimiterInclusive()Reads from the stream until specified byte is found, discarding all data,
including the delimiter.
Returns number of bytes discarded, or |
/// Writes bytes from the internally tracked stream position to `data`. /// /// Returns the number of bytes written, which will be at minimum `0` and /// at most the sum of each data slice length. The number of bytes read, /// including zero, does not indicate end of stream. /// /// The reader's internal logical seek position moves forward in accordance /// with the number of bytes returned from this function. pub fn readVec(r: *Reader, data: [][]u8) Error!usize { var seek = r.seek; for (data, 0..) |buf, i| { const contents = r.buffer[seek..r.end]; const copy_len = @min(contents.len, buf.len); @memcpy(buf[0..copy_len], contents[0..copy_len]); seek += copy_len; if (buf.len - copy_len == 0) continue; |
takeDelimiterExclusive()Reads from the stream until specified byte is found, discarding all data,
excluding the delimiter.
Returns the number of bytes discarded.
Succeeds if stream ends before delimiter found. End of stream can be
detected by checking if the delimiter is buffered.
See also:
* |
// All of `buffer` has been copied to `data`. const n = seek - r.seek; r.seek = seek; data[i] = buf[copy_len..]; defer data[i] = buf; return n + (r.vtable.readVec(r, data[i..]) catch |err| switch (err) { error.EndOfStream => if (n == 0) return error.EndOfStream else 0, error.ReadFailed => return error.ReadFailed, }); } const n = seek - r.seek; r.seek = seek; return n; |
TakeEnumErrorThe delimiter was not found within the limit. |
} |
streamDelimiter()Reads from the stream until specified byte is found, discarding all data, excluding the delimiter. Returns the number of bytes discarded. Succeeds if stream ends before delimiter found. End of stream can be detected by checking if the delimiter is buffered. |
/// Writes to `Reader.buffer` or `data`, whichever has larger capacity. pub fn defaultReadVec(r: *Reader, data: [][]u8) Error!usize { const first = data[0]; if (first.len >= r.buffer.len - r.end) { var writer: Writer = .{ .buffer = first, .end = 0, .vtable = &.{ .drain = Writer.fixedDrain }, }; const limit: Limit = .limited(writer.buffer.len - writer.end); return r.vtable.stream(r, &writer, limit) catch |err| switch (err) { error.WriteFailed => unreachable, else => |e| return e, }; } var writer: Writer = .{ .buffer = r.buffer, .end = r.end, .vtable = &.{ .drain = Writer.fixedDrain }, }; const limit: Limit = .limited(writer.buffer.len - writer.end); r.end += r.vtable.stream(r, &writer, limit) catch |err| switch (err) { error.WriteFailed => unreachable, else => |e| return e, }; return 0; |
TakeEnumErrorFills the buffer such that it contains at least |
} |
StreamDelimiterLimitErrorThis internal function is separated from |
pub fn buffered(r: *Reader) []u8 { return r.buffer[r.seek..r.end]; |
TakeEnumErrorWithout advancing the seek position, does exactly one underlying read, filling the buffer as
much as possible. This may result in zero bytes added to the buffer, which is not an end of
stream condition. End of stream is communicated via returning |
} |
discardDelimiterInclusive()Returns the next byte from the stream or returns |
pub fn bufferedLen(r: *const Reader) usize { return r.end - r.seek; |
TakeEnumErrorReads 1 byte from the stream or returns |
} |
DiscardDelimiterLimitErrorSame as |
pub fn hashed(r: *Reader, hasher: anytype, buffer: []u8) Hashed(@TypeOf(hasher)) { return .init(r, hasher, buffer); |
TakeEnumErrorAsserts the buffer was initialized with a capacity at least |
} |
fill()Asserts the buffer was initialized with a capacity at least |
pub fn readVecAll(r: *Reader, data: [][]u8) Error!void { var index: usize = 0; var truncate: usize = 0; while (index < data.len) { { const untruncated = data[index]; data[index] = untruncated[truncate..]; defer data[index] = untruncated; truncate += try r.readVec(data[index..]); } while (index < data.len and truncate >= data[index].len) { truncate -= data[index].len; index += 1; } } |
TakeEnumErrorAsserts the buffer was initialized with a capacity at least |
} |
peekByte()Obtains an unaligned pointer to the beginning of the stream, reinterpreted
as a pointer to the provided type, advancing the seek position.
Asserts the buffer was initialized with a capacity at least |
/// Returns the next `len` bytes from the stream, filling the buffer as /// necessary. /// /// Invalidates previously returned values from `peek`. /// /// Asserts that the `Reader` was initialized with a buffer capacity at /// least as big as `len`. /// /// If there are fewer than `len` bytes left in the stream, `error.EndOfStream` /// is returned instead. /// /// See also: /// * `peek` /// * `toss` pub fn peek(r: *Reader, n: usize) Error![]u8 { try r.fill(n); return r.buffer[r.seek..][0..n]; |
TakeEnumErrorObtains an unaligned pointer to the beginning of the stream, reinterpreted
as a pointer to the provided type, without advancing the seek position.
Asserts the buffer was initialized with a capacity at least |
} |
takeByteSigned()Asserts the buffer was initialized with a capacity at least |
/// Returns all the next buffered bytes, after filling the buffer to ensure it /// contains at least `n` bytes. /// /// Invalidates previously returned values from `peek` and `peekGreedy`. /// /// Asserts that the `Reader` was initialized with a buffer capacity at /// least as big as `n`. /// /// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` /// is returned instead. /// /// See also: /// * `peek` /// * `toss` pub fn peekGreedy(r: *Reader, n: usize) Error![]u8 { try r.fill(n); return r.buffer[r.seek..r.end]; |
TakeEnumErrorAsserts the buffer was initialized with a capacity at least |
} |
peekInt()Reads an integer with the same size as the given enum's tag type. If the
integer matches an enum tag, casts the integer to the enum tag and returns
it. Otherwise, returns |
/// Skips the next `n` bytes from the stream, advancing the seek position. This /// is typically and safely used after `peek`. /// /// Asserts that the number of bytes buffered is at least as many as `n`. /// /// The "tossed" memory remains alive until a "peek" operation occurs. /// /// See also: /// * `peek`. /// * `discard`. pub fn toss(r: *Reader, n: usize) void { r.seek += n; assert(r.seek <= r.end); |
TakeEnumErrorReads an integer with the same size as the given nonexhaustive enum's tag type.
Asserts the buffer was initialized with a capacity at least |
} |
takeStructPointer()Read a single LEB128 value as type T, or |
/// Equivalent to `toss(r.bufferedLen())`. pub fn tossBuffered(r: *Reader) void { r.seek = r.end; |
TakeEnumErrorEnsures |
} |
takeStruct()Provides a |
/// Equivalent to `peek` followed by `toss`. /// /// The data returned is invalidated by the next call to `take`, `peek`, /// `fill`, and functions with those prefixes. pub fn take(r: *Reader, n: usize) Error![]u8 { const result = try r.peek(n); r.toss(n); return result; |
TakeEnumError |
} |
TakeEnumError |
/// Returns the next `n` bytes from the stream as an array, filling the buffer /// as necessary and advancing the seek position `n` bytes. /// /// Asserts that the `Reader` was initialized with a buffer capacity at /// least as big as `n`. /// /// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` /// is returned instead. /// /// See also: /// * `take` pub fn takeArray(r: *Reader, comptime n: usize) Error!*[n]u8 { return (try r.take(n))[0..n]; |
TakeLeb128Error |
} |
takeEnumNonexhaustive() |
/// Returns the next `n` bytes from the stream as an array, filling the buffer /// as necessary, without advancing the seek position. /// /// Asserts that the `Reader` was initialized with a buffer capacity at /// least as big as `n`. /// /// If there are fewer than `n` bytes left in the stream, `error.EndOfStream` /// is returned instead. /// /// See also: /// * `peek` /// * `takeArray` pub fn peekArray(r: *Reader, comptime n: usize) Error!*[n]u8 { return (try r.peek(n))[0..n]; |
TakeLeb128Error |
} |
takeLeb128() |
/// Skips the next `n` bytes from the stream, advancing the seek position. /// /// Unlike `toss` which is infallible, in this function `n` can be any amount. /// /// Returns `error.EndOfStream` if fewer than `n` bytes could be discarded. /// /// See also: /// * `toss` /// * `discardRemaining` /// * `discardShort` /// * `discard` pub fn discardAll(r: *Reader, n: usize) Error!void { if ((try r.discardShort(n)) != n) return error.EndOfStream; |
writableVectorPosix() |
} |
FillAllocError |
pub fn discardAll64(r: *Reader, n: u64) Error!void { var remaining: u64 = n; while (remaining > 0) { const limited_remaining = std.math.cast(usize, remaining) orelse std.math.maxInt(usize); try discardAll(r, limited_remaining); remaining -= limited_remaining; } |
writableVectorPosix() |
} |
rebase() |
/// Skips the next `n` bytes from the stream, advancing the seek position. /// /// Unlike `toss` which is infallible, in this function `n` can be any amount. /// /// Returns the number of bytes discarded, which is less than `n` if and only /// if the stream reached the end. /// /// See also: /// * `discardAll` /// * `discardRemaining` /// * `discard` pub fn discardShort(r: *Reader, n: usize) ShortError!usize { const proposed_seek = r.seek + n; if (proposed_seek <= r.end) { @branchHint(.likely); r.seek = proposed_seek; return n; } var remaining = n - (r.end - r.seek); r.seek = r.end; while (true) { const discard_len = r.vtable.discard(r, .limited(remaining)) catch |err| switch (err) { error.EndOfStream => return n - remaining, error.ReadFailed => return error.ReadFailed, }; remaining -= discard_len; if (remaining == 0) return n; } |
writableVectorPosix() |
} |
Test: fixed |
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing /// the seek position. /// /// Invalidates previously returned values from `peek`. /// /// If the provided buffer cannot be filled completely, `error.EndOfStream` is /// returned instead. /// /// See also: /// * `peek` /// * `readSliceShort` pub fn readSliceAll(r: *Reader, buffer: []u8) Error!void { const n = try readSliceShort(r, buffer); if (n != buffer.len) return error.EndOfStream; |
writableVectorPosix() |
} |
Test: peekGreedy |
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing /// the seek position. /// /// Invalidates previously returned values from `peek`. /// /// Returns the number of bytes read, which is less than `buffer.len` if and /// only if the stream reached the end. /// /// See also: /// * `readSliceAll` pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize { const contents = r.buffer[r.seek..r.end]; const copy_len = @min(buffer.len, contents.len); @memcpy(buffer[0..copy_len], contents[0..copy_len]); r.seek += copy_len; if (buffer.len - copy_len == 0) { @branchHint(.likely); return buffer.len; } var i: usize = copy_len; var data: [1][]u8 = undefined; while (true) { data[0] = buffer[i..]; i += readVec(r, &data) catch |err| switch (err) { error.EndOfStream => return i, error.ReadFailed => return error.ReadFailed, }; if (buffer.len - i == 0) return buffer.len; } |
writableVectorPosix() |
} |
Test: take |
/// Fill `buffer` with the next `buffer.len` bytes from the stream, advancing /// the seek position. /// /// Invalidates previously returned values from `peek`. /// /// If the provided buffer cannot be filled completely, `error.EndOfStream` is /// returned instead. /// /// The function is inline to avoid the dead code in case `endian` is /// comptime-known and matches host endianness. /// /// See also: /// * `readSliceAll` /// * `readSliceEndianAlloc` pub inline fn readSliceEndian( r: *Reader, comptime Elem: type, buffer: []Elem, endian: std.builtin.Endian, ) Error!void { try readSliceAll(r, @ptrCast(buffer)); if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem); |
writableVectorPosix() |
} |
Test: peekArray |
pub const ReadAllocError = Error || Allocator.Error; |
Test: discardAll |
/// The function is inline to avoid the dead code in case `endian` is /// comptime-known and matches host endianness. pub inline fn readSliceEndianAlloc( r: *Reader, allocator: Allocator, comptime Elem: type, len: usize, endian: std.builtin.Endian, ) ReadAllocError![]Elem { const dest = try allocator.alloc(Elem, len); errdefer allocator.free(dest); try readSliceAll(r, @ptrCast(dest)); if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem); return dest; |
writableVectorPosix() |
} |
Test: stream |
/// Shortcut for calling `readSliceAll` with a buffer provided by `allocator`. pub fn readAlloc(r: *Reader, allocator: Allocator, len: usize) ReadAllocError![]u8 { const dest = try allocator.alloc(u8, len); errdefer allocator.free(dest); try readSliceAll(r, dest); return dest; |
writableVectorPosix() |
} |
Test: peekSentinel |
pub const DelimiterError = error{ /// See the `Reader` implementation for detailed diagnostics. ReadFailed, /// For "inclusive" functions, stream ended before the delimiter was found. /// For "exclusive" functions, stream ended and there are no more bytes to /// return. EndOfStream, /// The delimiter was not found within a number of bytes matching the /// capacity of the `Reader`. StreamTooLong, }; |
Test: takeDelimiterInclusive |
/// Returns a slice of the next bytes of buffered data from the stream until /// `sentinel` is found, advancing the seek position. /// /// Returned slice has a sentinel. /// /// Invalidates previously returned values from `peek`. /// /// See also: /// * `peekSentinel` /// * `takeDelimiterExclusive` /// * `takeDelimiterInclusive` pub fn takeSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 { const result = try r.peekSentinel(sentinel); r.toss(result.len + 1); return result; |
writableVectorPosix() |
} |
Test: takeDelimiterExclusive |
/// Returns a slice of the next bytes of buffered data from the stream until /// `sentinel` is found, without advancing the seek position. /// /// Returned slice has a sentinel; end of stream does not count as a delimiter. /// /// Invalidates previously returned values from `peek`. /// /// See also: /// * `takeSentinel` /// * `peekDelimiterExclusive` /// * `peekDelimiterInclusive` pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 { const result = try r.peekDelimiterInclusive(sentinel); return result[0 .. result.len - 1 :sentinel]; |
writableVectorPosix() |
} |
Test: streamDelimiter |
/// Returns a slice of the next bytes of buffered data from the stream until /// `delimiter` is found, advancing the seek position. /// /// Returned slice includes the delimiter as the last byte. /// /// Invalidates previously returned values from `peek`. /// /// See also: /// * `takeSentinel` /// * `takeDelimiterExclusive` /// * `peekDelimiterInclusive` pub fn takeDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { const result = try r.peekDelimiterInclusive(delimiter); r.toss(result.len); return result; |
writableVectorPosix() |
} |
Test: streamDelimiterLimit |
/// Returns a slice of the next bytes of buffered data from the stream until /// `delimiter` is found, without advancing the seek position. /// /// Returned slice includes the delimiter as the last byte. /// /// Invalidates previously returned values from `peek`. /// /// See also: /// * `peekSentinel` /// * `peekDelimiterExclusive` /// * `takeDelimiterInclusive` pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { const buffer = r.buffer[0..r.end]; const seek = r.seek; if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| { @branchHint(.likely); return buffer[seek .. end + 1]; } // TODO take a parameter for max search length rather than relying on buffer capacity try rebase(r, r.buffer.len); while (r.buffer.len - r.end != 0) { const end_cap = r.buffer[r.end..]; var writer: Writer = .fixed(end_cap); const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) { error.WriteFailed => unreachable, else => |e| return e, }; r.end += n; if (std.mem.indexOfScalarPos(u8, end_cap[0..n], 0, delimiter)) |end| { return r.buffer[0 .. r.end - n + end + 1]; } } return error.StreamTooLong; |
writableVectorPosix() |
} |
Test: discardDelimiterInclusive |
/// Returns a slice of the next bytes of buffered data from the stream until /// `delimiter` is found, advancing the seek position. /// /// Returned slice excludes the delimiter. End-of-stream is treated equivalent /// to a delimiter, unless it would result in a length 0 return value, in which /// case `error.EndOfStream` is returned instead. /// /// If the delimiter is not found within a number of bytes matching the /// capacity of this `Reader`, `error.StreamTooLong` is returned. In /// such case, the stream state is unmodified as if this function was never /// called. /// /// Invalidates previously returned values from `peek`. /// /// See also: /// * `takeDelimiterInclusive` /// * `peekDelimiterExclusive` pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) { error.EndOfStream => { const remaining = r.buffer[r.seek..r.end]; if (remaining.len == 0) return error.EndOfStream; r.toss(remaining.len); return remaining; }, else => |e| return e, }; r.toss(result.len); return result[0 .. result.len - 1]; |
writableVectorPosix() |
} |
Test: fill |
/// Returns a slice of the next bytes of buffered data from the stream until /// `delimiter` is found, without advancing the seek position. /// /// Returned slice excludes the delimiter. End-of-stream is treated equivalent /// to a delimiter, unless it would result in a length 0 return value, in which /// case `error.EndOfStream` is returned instead. /// /// If the delimiter is not found within a number of bytes matching the /// capacity of this `Reader`, `error.StreamTooLong` is returned. In /// such case, the stream state is unmodified as if this function was never /// called. /// /// Invalidates previously returned values from `peek`. /// /// See also: /// * `peekDelimiterInclusive` /// * `takeDelimiterExclusive` pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 { const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) { error.EndOfStream => { const remaining = r.buffer[r.seek..r.end]; if (remaining.len == 0) return error.EndOfStream; r.toss(remaining.len); return remaining; }, else => |e| return e, }; return result[0 .. result.len - 1]; |
writableVectorPosix() |
} |
Test: takeByteSigned |
/// Appends to `w` contents by reading from the stream until `delimiter` is /// found. Does not write the delimiter itself. /// /// Returns number of bytes streamed, which may be zero, or error.EndOfStream /// if the delimiter was not found. /// /// Asserts buffer capacity of at least one. This function performs better with /// larger buffers. /// /// See also: /// * `streamDelimiterEnding` /// * `streamDelimiterLimit` pub fn streamDelimiter(r: *Reader, w: *Writer, delimiter: u8) StreamError!usize { const n = streamDelimiterLimit(r, w, delimiter, .unlimited) catch |err| switch (err) { error.StreamTooLong => unreachable, // unlimited is passed else => |e| return e, }; if (r.seek == r.end) return error.EndOfStream; return n; |
writableVectorPosix() |
} |
Test: takeVarInt |
/// Appends to `w` contents by reading from the stream until `delimiter` is found. /// Does not write the delimiter itself. /// /// Returns number of bytes streamed, which may be zero. If the stream reaches /// the end, the reader buffer will be empty when this function returns. /// Otherwise, it will have at least one byte buffered, starting with the /// delimiter. /// /// Asserts buffer capacity of at least one. This function performs better with /// larger buffers. /// /// See also: /// * `streamDelimiter` /// * `streamDelimiterLimit` pub fn streamDelimiterEnding( r: *Reader, w: *Writer, delimiter: u8, ) StreamRemainingError!usize { return streamDelimiterLimit(r, w, delimiter, .unlimited) catch |err| switch (err) { error.StreamTooLong => unreachable, // unlimited is passed else => |e| return e, }; |
writableVectorPosix() |
} |
Test: peekStructPointer |
pub const StreamDelimiterLimitError = error{ ReadFailed, WriteFailed, /// The delimiter was not found within the limit. StreamTooLong, }; |
Test: takeStruct |
/// Appends to `w` contents by reading from the stream until `delimiter` is found. /// Does not write the delimiter itself. /// /// Returns number of bytes streamed, which may be zero. End of stream can be /// detected by checking if the next byte in the stream is the delimiter. /// /// Asserts buffer capacity of at least one. This function performs better with /// larger buffers. pub fn streamDelimiterLimit( r: *Reader, w: *Writer, delimiter: u8, limit: Limit, ) StreamDelimiterLimitError!usize { var remaining = @intFromEnum(limit); while (remaining != 0) { const available = Limit.limited(remaining).slice(r.peekGreedy(1) catch |err| switch (err) { error.ReadFailed => return error.ReadFailed, error.EndOfStream => return @intFromEnum(limit) - remaining, }); if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| { try w.writeAll(available[0..delimiter_index]); r.toss(delimiter_index); remaining -= delimiter_index; return @intFromEnum(limit) - remaining; } try w.writeAll(available); r.toss(available.len); remaining -= available.len; } return error.StreamTooLong; |
writableVectorPosix() |
} |
Test: takeEnum |
/// Reads from the stream until specified byte is found, discarding all data, /// including the delimiter. /// /// Returns number of bytes discarded, or `error.EndOfStream` if the delimiter /// is not found. /// /// See also: /// * `discardDelimiterExclusive` /// * `discardDelimiterLimit` pub fn discardDelimiterInclusive(r: *Reader, delimiter: u8) Error!usize { const n = discardDelimiterLimit(r, delimiter, .unlimited) catch |err| switch (err) { error.StreamTooLong => unreachable, // unlimited is passed else => |e| return e, }; if (r.seek == r.end) return error.EndOfStream; assert(r.buffer[r.seek] == delimiter); toss(r, 1); return n + 1; |
writableVectorPosix() |
} |
Test: readSliceShort |
/// Reads from the stream until specified byte is found, discarding all data, /// excluding the delimiter. /// /// Returns the number of bytes discarded. /// /// Succeeds if stream ends before delimiter found. End of stream can be /// detected by checking if the delimiter is buffered. /// /// See also: /// * `discardDelimiterInclusive` /// * `discardDelimiterLimit` pub fn discardDelimiterExclusive(r: *Reader, delimiter: u8) ShortError!usize { return discardDelimiterLimit(r, delimiter, .unlimited) catch |err| switch (err) { error.StreamTooLong => unreachable, // unlimited is passed else => |e| return e, }; |
writableVectorPosix() |
} |
Test: readVec |
pub const DiscardDelimiterLimitError = error{ ReadFailed, /// The delimiter was not found within the limit. StreamTooLong, }; |
Test:expected error.EndOfStream |
/// Reads from the stream until specified byte is found, discarding all data, /// excluding the delimiter. /// /// Returns the number of bytes discarded. /// /// Succeeds if stream ends before delimiter found. End of stream can be /// detected by checking if the delimiter is buffered. pub fn discardDelimiterLimit(r: *Reader, delimiter: u8, limit: Limit) DiscardDelimiterLimitError!usize { var remaining = @intFromEnum(limit); while (remaining != 0) { const available = Limit.limited(remaining).slice(r.peekGreedy(1) catch |err| switch (err) { error.ReadFailed => return error.ReadFailed, error.EndOfStream => return @intFromEnum(limit) - remaining, }); if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| { r.toss(delimiter_index); remaining -= delimiter_index; return @intFromEnum(limit) - remaining; } r.toss(available.len); remaining -= available.len; } return error.StreamTooLong; |
writableVectorPosix() |
} |
adaptToOldInterface() |
/// Fills the buffer such that it contains at least `n` bytes, without /// advancing the seek position. /// /// Returns `error.EndOfStream` if and only if there are fewer than `n` bytes /// remaining. /// /// If the end of stream is not encountered, asserts buffer capacity is at /// least `n`. pub fn fill(r: *Reader, n: usize) Error!void { if (r.seek + n <= r.end) { @branchHint(.likely); return; } return fillUnbuffered(r, n); |
writableVectorPosix() |
} |
Test:takeDelimiterInclusive when it rebases |
/// This internal function is separated from `fill` to encourage optimizers to inline `fill`, hence /// propagating its `@branchHint` to usage sites. If these functions are combined, `fill` is large /// enough that LLVM is reluctant to inline it, forcing usages of APIs like `takeInt` to go through /// an expensive runtime function call just to figure out that the data is, in fact, already in the /// buffer. /// /// Missing this optimization can result in wall-clock time for the most affected benchmarks /// increasing by a factor of 5 or more. fn fillUnbuffered(r: *Reader, n: usize) Error!void { try rebase(r, n); var bufs: [1][]u8 = .{""}; while (r.end < r.seek + n) _ = try r.vtable.readVec(r, &bufs); |
writableVectorPosix() |
} |
Hashed() |
/// Without advancing the seek position, does exactly one underlying read, filling the buffer as /// much as possible. This may result in zero bytes added to the buffer, which is not an end of /// stream condition. End of stream is communicated via returning `error.EndOfStream`. /// /// Asserts buffer capacity is at least 1. pub fn fillMore(r: *Reader) Error!void { try rebase(r, 1); var bufs: [1][]u8 = .{""}; _ = try r.vtable.readVec(r, &bufs); |
writableVectorPosix() |
} |
writableVectorPosix() |
/// Returns the next byte from the stream or returns `error.EndOfStream`. /// /// Does not advance the seek position. /// /// Asserts the buffer capacity is nonzero. pub fn peekByte(r: *Reader) Error!u8 { const buffer = r.buffer[0..r.end]; const seek = r.seek; if (seek < buffer.len) { @branchHint(.likely); return buffer[seek]; } try fill(r, 1); return r.buffer[r.seek]; |
writableVector() |
} |
writableVector() |
/// Reads 1 byte from the stream or returns `error.EndOfStream`. /// /// Asserts the buffer capacity is nonzero. pub fn takeByte(r: *Reader) Error!u8 { const result = try peekByte(r); r.seek += 1; return result; } /// Same as `takeByte` except the returned byte is signed. pub fn takeByteSigned(r: *Reader) Error!i8 { return @bitCast(try r.takeByte()); } /// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`. pub inline fn takeInt(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { const n = @divExact(@typeInfo(T).int.bits, 8); return std.mem.readInt(T, try r.takeArray(n), endian); } /// Asserts the buffer was initialized with a capacity at least `@bitSizeOf(T) / 8`. pub inline fn peekInt(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { const n = @divExact(@typeInfo(T).int.bits, 8); return std.mem.readInt(T, try r.peekArray(n), endian); } /// Asserts the buffer was initialized with a capacity at least `n`. pub fn takeVarInt(r: *Reader, comptime Int: type, endian: std.builtin.Endian, n: usize) Error!Int { assert(n <= @sizeOf(Int)); return std.mem.readVarInt(Int, try r.take(n), endian); } /// Obtains an unaligned pointer to the beginning of the stream, reinterpreted /// as a pointer to the provided type, advancing the seek position. /// /// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. /// /// See also: /// * `peekStructPointer` /// * `takeStruct` pub fn takeStructPointer(r: *Reader, comptime T: type) Error!*align(1) T { // Only extern and packed structs have defined in-memory layout. comptime assert(@typeInfo(T).@"struct".layout != .auto); return @ptrCast(try r.takeArray(@sizeOf(T))); } /// Obtains an unaligned pointer to the beginning of the stream, reinterpreted /// as a pointer to the provided type, without advancing the seek position. /// /// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. /// /// See also: /// * `takeStructPointer` /// * `peekStruct` pub fn peekStructPointer(r: *Reader, comptime T: type) Error!*align(1) T { // Only extern and packed structs have defined in-memory layout. comptime assert(@typeInfo(T).@"struct".layout != .auto); return @ptrCast(try r.peekArray(@sizeOf(T))); } /// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. /// /// This function is inline to avoid referencing `std.mem.byteSwapAllFields` /// when `endian` is comptime-known and matches the host endianness. /// /// See also: /// * `takeStructPointer` /// * `peekStruct` pub inline fn takeStruct(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { switch (@typeInfo(T)) { .@"struct" => |info| switch (info.layout) { .auto => @compileError("ill-defined memory layout"), .@"extern" => { var res = (try r.takeStructPointer(T)).*; if (native_endian != endian) std.mem.byteSwapAllFields(T, &res); return res; }, .@"packed" => { return @bitCast(try takeInt(r, info.backing_integer.?, endian)); }, }, else => @compileError("not a struct"), } } /// Asserts the buffer was initialized with a capacity at least `@sizeOf(T)`. /// /// This function is inline to avoid referencing `std.mem.byteSwapAllFields` /// when `endian` is comptime-known and matches the host endianness. /// /// See also: /// * `takeStruct` /// * `peekStructPointer` pub inline fn peekStruct(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T { switch (@typeInfo(T)) { .@"struct" => |info| switch (info.layout) { .auto => @compileError("ill-defined memory layout"), .@"extern" => { var res = (try r.peekStructPointer(T)).*; if (native_endian != endian) std.mem.byteSwapAllFields(T, &res); return res; }, .@"packed" => { return @bitCast(try peekInt(r, info.backing_integer.?, endian)); }, }, else => @compileError("not a struct"), } } pub const TakeEnumError = Error || error{InvalidEnumTag}; /// Reads an integer with the same size as the given enum's tag type. If the /// integer matches an enum tag, casts the integer to the enum tag and returns /// it. Otherwise, returns `error.InvalidEnumTag`. /// /// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`. pub fn takeEnum(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) TakeEnumError!Enum { const Tag = @typeInfo(Enum).@"enum".tag_type; const int = try r.takeInt(Tag, endian); return std.meta.intToEnum(Enum, int); } /// Reads an integer with the same size as the given nonexhaustive enum's tag type. /// /// Asserts the buffer was initialized with a capacity at least `@sizeOf(Enum)`. pub fn takeEnumNonexhaustive(r: *Reader, comptime Enum: type, endian: std.builtin.Endian) Error!Enum { const info = @typeInfo(Enum).@"enum"; comptime assert(!info.is_exhaustive); comptime assert(@bitSizeOf(info.tag_type) == @sizeOf(info.tag_type) * 8); return takeEnum(r, Enum, endian) catch |err| switch (err) { error.InvalidEnumTag => unreachable, else => |e| return e, }; } pub const TakeLeb128Error = Error || error{Overflow}; /// Read a single LEB128 value as type T, or `error.Overflow` if the value cannot fit. pub fn takeLeb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { const result_info = @typeInfo(Result).int; return std.math.cast(Result, try r.takeMultipleOf7Leb128(@Type(.{ .int = .{ .signedness = result_info.signedness, .bits = std.mem.alignForwardAnyAlign(u16, result_info.bits, 7), } }))) orelse error.Overflow; } pub fn expandTotalCapacity(r: *Reader, allocator: Allocator, n: usize) Allocator.Error!void { if (n <= r.buffer.len) return; if (r.seek > 0) rebase(r, r.buffer.len); var list: ArrayList(u8) = .{ .items = r.buffer[0..r.end], .capacity = r.buffer.len, }; defer r.buffer = list.allocatedSlice(); try list.ensureTotalCapacity(allocator, n); } pub const FillAllocError = Error || Allocator.Error; pub fn fillAlloc(r: *Reader, allocator: Allocator, n: usize) FillAllocError!void { try expandTotalCapacity(r, allocator, n); return fill(r, n); } fn takeMultipleOf7Leb128(r: *Reader, comptime Result: type) TakeLeb128Error!Result { const result_info = @typeInfo(Result).int; comptime assert(result_info.bits % 7 == 0); var remaining_bits: std.math.Log2IntCeil(Result) = result_info.bits; const UnsignedResult = @Type(.{ .int = .{ .signedness = .unsigned, .bits = result_info.bits, } }); var result: UnsignedResult = 0; var fits = true; while (true) { const buffer: []const packed struct(u8) { bits: u7, more: bool } = @ptrCast(try r.peekGreedy(1)); for (buffer, 1..) |byte, len| { if (remaining_bits > 0) { result = @shlExact(@as(UnsignedResult, byte.bits), result_info.bits - 7) | if (result_info.bits > 7) @shrExact(result, 7) else 0; remaining_bits -= 7; } else if (fits) fits = switch (result_info.signedness) { .signed => @as(i7, @bitCast(byte.bits)) == @as(i7, @truncate(@as(Result, @bitCast(result)) >> (result_info.bits - 1))), .unsigned => byte.bits == 0, }; if (byte.more) continue; r.toss(len); return if (fits) @as(Result, @bitCast(result)) >> remaining_bits else error.Overflow; } r.toss(buffer.len); } } /// Ensures `capacity` more data can be buffered without rebasing. pub fn rebase(r: *Reader, capacity: usize) RebaseError!void { if (r.end + capacity <= r.buffer.len) return; return r.vtable.rebase(r, capacity); } pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void { if (r.end <= r.buffer.len - capacity) return; const data = r.buffer[r.seek..r.end]; @memmove(r.buffer[0..data.len], data); r.seek = 0; r.end = data.len; } test fixed { var r: Reader = .fixed("a\x02"); try testing.expect((try r.takeByte()) == 'a'); try testing.expect((try r.takeEnum(enum(u8) { a = 0, b = 99, c = 2, d = 3, }, builtin.cpu.arch.endian())) == .c); try testing.expectError(error.EndOfStream, r.takeByte()); } test peek { var r: Reader = .fixed("abc"); try testing.expectEqualStrings("ab", try r.peek(2)); try testing.expectEqualStrings("a", try r.peek(1)); } test peekGreedy { var r: Reader = .fixed("abc"); try testing.expectEqualStrings("abc", try r.peekGreedy(1)); } test toss { var r: Reader = .fixed("abc"); r.toss(1); try testing.expectEqualStrings("bc", r.buffered()); } test take { var r: Reader = .fixed("abc"); try testing.expectEqualStrings("ab", try r.take(2)); try testing.expectEqualStrings("c", try r.take(1)); } test takeArray { var r: Reader = .fixed("abc"); try testing.expectEqualStrings("ab", try r.takeArray(2)); try testing.expectEqualStrings("c", try r.takeArray(1)); } test peekArray { var r: Reader = .fixed("abc"); try testing.expectEqualStrings("ab", try r.peekArray(2)); try testing.expectEqualStrings("a", try r.peekArray(1)); } test discardAll { var r: Reader = .fixed("foobar"); try r.discardAll(3); try testing.expectEqualStrings("bar", try r.take(3)); try r.discardAll(0); try testing.expectError(error.EndOfStream, r.discardAll(1)); } test discardRemaining { var r: Reader = .fixed("foobar"); r.toss(1); try testing.expectEqual(5, try r.discardRemaining()); try testing.expectEqual(0, try r.discardRemaining()); } test stream { var out_buffer: [10]u8 = undefined; var r: Reader = .fixed("foobar"); var w: Writer = .fixed(&out_buffer); // Short streams are possible with this function but not with fixed. try testing.expectEqual(2, try r.stream(&w, .limited(2))); try testing.expectEqualStrings("fo", w.buffered()); try testing.expectEqual(4, try r.stream(&w, .unlimited)); try testing.expectEqualStrings("foobar", w.buffered()); } test takeSentinel { var r: Reader = .fixed("ab\nc"); try testing.expectEqualStrings("ab", try r.takeSentinel('\n')); try testing.expectError(error.EndOfStream, r.takeSentinel('\n')); try testing.expectEqualStrings("c", try r.peek(1)); } test peekSentinel { var r: Reader = .fixed("ab\nc"); try testing.expectEqualStrings("ab", try r.peekSentinel('\n')); try testing.expectEqualStrings("ab", try r.peekSentinel('\n')); } test takeDelimiterInclusive { var r: Reader = .fixed("ab\nc"); try testing.expectEqualStrings("ab\n", try r.takeDelimiterInclusive('\n')); try testing.expectError(error.EndOfStream, r.takeDelimiterInclusive('\n')); } test peekDelimiterInclusive { var r: Reader = .fixed("ab\nc"); try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n')); try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n')); r.toss(3); try testing.expectError(error.EndOfStream, r.peekDelimiterInclusive('\n')); } test takeDelimiterExclusive { var r: Reader = .fixed("ab\nc"); try testing.expectEqualStrings("ab", try r.takeDelimiterExclusive('\n')); try testing.expectEqualStrings("c", try r.takeDelimiterExclusive('\n')); try testing.expectError(error.EndOfStream, r.takeDelimiterExclusive('\n')); } test peekDelimiterExclusive { var r: Reader = .fixed("ab\nc"); try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n')); try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n')); r.toss(3); try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n')); } test streamDelimiter { var out_buffer: [10]u8 = undefined; var r: Reader = .fixed("foo\nbars"); var w: Writer = .fixed(&out_buffer); try testing.expectEqual(3, try r.streamDelimiter(&w, '\n')); try testing.expectEqualStrings("foo", w.buffered()); try testing.expectEqual(0, try r.streamDelimiter(&w, '\n')); r.toss(1); try testing.expectError(error.EndOfStream, r.streamDelimiter(&w, '\n')); } test streamDelimiterEnding { var out_buffer: [10]u8 = undefined; var r: Reader = .fixed("foo\nbars"); var w: Writer = .fixed(&out_buffer); try testing.expectEqual(3, try r.streamDelimiterEnding(&w, '\n')); try testing.expectEqualStrings("foo", w.buffered()); r.toss(1); try testing.expectEqual(4, try r.streamDelimiterEnding(&w, '\n')); try testing.expectEqualStrings("foobars", w.buffered()); try testing.expectEqual(0, try r.streamDelimiterEnding(&w, '\n')); try testing.expectEqual(0, try r.streamDelimiterEnding(&w, '\n')); } test streamDelimiterLimit { var out_buffer: [10]u8 = undefined; var r: Reader = .fixed("foo\nbars"); var w: Writer = .fixed(&out_buffer); try testing.expectError(error.StreamTooLong, r.streamDelimiterLimit(&w, '\n', .limited(2))); try testing.expectEqual(1, try r.streamDelimiterLimit(&w, '\n', .limited(3))); try testing.expectEqualStrings("\n", try r.take(1)); try testing.expectEqual(4, try r.streamDelimiterLimit(&w, '\n', .unlimited)); try testing.expectEqualStrings("foobars", w.buffered()); } test discardDelimiterExclusive { var r: Reader = .fixed("foob\nar"); try testing.expectEqual(4, try r.discardDelimiterExclusive('\n')); try testing.expectEqualStrings("\n", try r.take(1)); try testing.expectEqual(2, try r.discardDelimiterExclusive('\n')); try testing.expectEqual(0, try r.discardDelimiterExclusive('\n')); } test discardDelimiterInclusive { var r: Reader = .fixed("foob\nar"); try testing.expectEqual(5, try r.discardDelimiterInclusive('\n')); try testing.expectError(error.EndOfStream, r.discardDelimiterInclusive('\n')); } test discardDelimiterLimit { var r: Reader = .fixed("foob\nar"); try testing.expectError(error.StreamTooLong, r.discardDelimiterLimit('\n', .limited(4))); try testing.expectEqual(0, try r.discardDelimiterLimit('\n', .limited(2))); try testing.expectEqualStrings("\n", try r.take(1)); try testing.expectEqual(2, try r.discardDelimiterLimit('\n', .unlimited)); try testing.expectEqual(0, try r.discardDelimiterLimit('\n', .unlimited)); } test fill { var r: Reader = .fixed("abc"); try r.fill(1); try r.fill(3); } test takeByte { var r: Reader = .fixed("ab"); try testing.expectEqual('a', try r.takeByte()); try testing.expectEqual('b', try r.takeByte()); try testing.expectError(error.EndOfStream, r.takeByte()); } test takeByteSigned { var r: Reader = .fixed(&.{ 255, 5 }); try testing.expectEqual(-1, try r.takeByteSigned()); try testing.expectEqual(5, try r.takeByteSigned()); try testing.expectError(error.EndOfStream, r.takeByteSigned()); } test takeInt { var r: Reader = .fixed(&.{ 0x12, 0x34, 0x56 }); try testing.expectEqual(0x1234, try r.takeInt(u16, .big)); try testing.expectError(error.EndOfStream, r.takeInt(u16, .little)); } test takeVarInt { var r: Reader = .fixed(&.{ 0x12, 0x34, 0x56 }); try testing.expectEqual(0x123456, try r.takeVarInt(u64, .big, 3)); try testing.expectError(error.EndOfStream, r.takeVarInt(u16, .little, 1)); } test takeStructPointer { var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 }); const S = extern struct { a: u8, b: u16 }; switch (native_endian) { .little => try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.takeStructPointer(S)).*), .big => try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.takeStructPointer(S)).*), } try testing.expectError(error.EndOfStream, r.takeStructPointer(S)); } test peekStructPointer { var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 }); const S = extern struct { a: u8, b: u16 }; switch (native_endian) { .little => { try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.peekStructPointer(S)).*); try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.peekStructPointer(S)).*); }, .big => { try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.peekStructPointer(S)).*); try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.peekStructPointer(S)).*); }, } } test takeStruct { var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 }); const S = extern struct { a: u8, b: u16 }; try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), try r.takeStruct(S, .big)); try testing.expectError(error.EndOfStream, r.takeStruct(S, .little)); } test peekStruct { var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 }); const S = extern struct { a: u8, b: u16 }; try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), try r.peekStruct(S, .big)); try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), try r.peekStruct(S, .little)); } test takeEnum { var r: Reader = .fixed(&.{ 2, 0, 1 }); const E1 = enum(u8) { a, b, c }; const E2 = enum(u16) { _ }; try testing.expectEqual(E1.c, try r.takeEnum(E1, .little)); try testing.expectEqual(@as(E2, @enumFromInt(0x0001)), try r.takeEnum(E2, .big)); } test takeLeb128 { var r: Reader = .fixed("\xc7\x9f\x7f\x80"); try testing.expectEqual(-12345, try r.takeLeb128(i64)); try testing.expectEqual(0x80, try r.peekByte()); try testing.expectError(error.EndOfStream, r.takeLeb128(i64)); } test readSliceShort { var r: Reader = .fixed("HelloFren"); var buf: [5]u8 = undefined; try testing.expectEqual(5, try r.readSliceShort(&buf)); try testing.expectEqualStrings("Hello", buf[0..5]); try testing.expectEqual(4, try r.readSliceShort(&buf)); try testing.expectEqualStrings("Fren", buf[0..4]); try testing.expectEqual(0, try r.readSliceShort(&buf)); } test "readSliceShort with smaller buffer than Reader" { var reader_buf: [15]u8 = undefined; const str = "This is a test"; var one_byte_stream: testing.Reader = .init(&reader_buf, &.{ .{ .buffer = str }, }); one_byte_stream.artificial_limit = .limited(1); var buf: [14]u8 = undefined; try testing.expectEqual(14, try one_byte_stream.interface.readSliceShort(&buf)); try testing.expectEqualStrings(str, &buf); } test readVec { var r: Reader = .fixed(std.ascii.letters); var flat_buffer: [52]u8 = undefined; var bufs: [2][]u8 = .{ flat_buffer[0..26], flat_buffer[26..], }; // Short reads are possible with this function but not with fixed. try testing.expectEqual(26 * 2, try r.readVec(&bufs)); try testing.expectEqualStrings(std.ascii.letters[0..26], bufs[0]); try testing.expectEqualStrings(std.ascii.letters[26..], bufs[1]); } test "expected error.EndOfStream" { // Unit test inspired by https://github.com/ziglang/zig/issues/17733 var buffer: [3]u8 = undefined; var r: std.io.Reader = .fixed(&buffer); r.end = 0; // capacity 3, but empty try std.testing.expectError(error.EndOfStream, r.takeEnum(enum(u8) { a, b }, .little)); try std.testing.expectError(error.EndOfStream, r.take(3)); } test "readVec at end" { var reader_buffer: [8]u8 = "abcd1234".*; var reader: testing.Reader = .init(&reader_buffer, &.{}); reader.interface.end = reader_buffer.len; var out: [16]u8 = undefined; var vecs: [1][]u8 = .{&out}; try testing.expectEqual(8, try reader.interface.readVec(&vecs)); try testing.expectEqualStrings("abcd1234", vecs[0][0..8]); } fn endingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { _ = r; _ = w; _ = limit; return error.EndOfStream; } fn endingReadVec(r: *Reader, data: [][]u8) Error!usize { _ = r; _ = data; return error.EndOfStream; } fn endingDiscard(r: *Reader, limit: Limit) Error!usize { _ = r; _ = limit; return error.EndOfStream; } fn endingRebase(r: *Reader, capacity: usize) RebaseError!void { _ = r; _ = capacity; return error.EndOfStream; } fn failingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { _ = r; _ = w; _ = limit; return error.ReadFailed; } fn failingDiscard(r: *Reader, limit: Limit) Error!usize { _ = r; _ = limit; return error.ReadFailed; } pub fn adaptToOldInterface(r: *Reader) std.Io.AnyReader { return .{ .context = r, .readFn = derpRead }; } fn derpRead(context: *const anyopaque, buffer: []u8) anyerror!usize { const r: *Reader = @ptrCast(@alignCast(@constCast(context))); return r.readSliceShort(buffer); } test "readAlloc when the backing reader provides one byte at a time" { const str = "This is a test"; var tiny_buffer: [1]u8 = undefined; var one_byte_stream: testing.Reader = .init(&tiny_buffer, &.{ .{ .buffer = str }, }); one_byte_stream.artificial_limit = .limited(1); const res = try one_byte_stream.interface.allocRemaining(std.testing.allocator, .unlimited); defer std.testing.allocator.free(res); try std.testing.expectEqualStrings(str, res); } test "takeDelimiterInclusive when it rebases" { const written_line = "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n"; var buffer: [128]u8 = undefined; var tr: std.testing.Reader = .init(&buffer, &.{ .{ .buffer = written_line }, .{ .buffer = written_line }, .{ .buffer = written_line }, .{ .buffer = written_line }, .{ .buffer = written_line }, .{ .buffer = written_line }, }); const r = &tr.interface; for (0..6) |_| { try std.testing.expectEqualStrings(written_line, try r.takeDelimiterInclusive('\n')); } } test "takeStruct and peekStruct packed" { var r: Reader = .fixed(&.{ 0b11110000, 0b00110011 }); const S = packed struct(u16) { a: u2, b: u6, c: u7, d: u1 }; try testing.expectEqual(@as(S, .{ .a = 0b11, .b = 0b001100, .c = 0b1110000, .d = 0b1, }), try r.peekStruct(S, .big)); try testing.expectEqual(@as(S, .{ .a = 0b11, .b = 0b001100, .c = 0b1110000, .d = 0b1, }), try r.takeStruct(S, .big)); try testing.expectError(error.EndOfStream, r.takeStruct(S, .little)); } /// Provides a `Reader` implementation by passing data from an underlying /// reader through `Hasher.update`. /// /// The underlying reader is best unbuffered. /// /// This implementation makes suboptimal buffering decisions due to being /// generic. A better solution will involve creating a reader for each hash /// function, where the discard buffer can be tailored to the hash /// implementation details. pub fn Hashed(comptime Hasher: type) type { return struct { in: *Reader, hasher: Hasher, reader: Reader, pub fn init(in: *Reader, hasher: Hasher, buffer: []u8) @This() { return .{ .in = in, .hasher = hasher, .reader = .{ .vtable = &.{ .stream = @This().stream, .readVec = @This().readVec, .discard = @This().discard, }, .buffer = buffer, .end = 0, .seek = 0, }, }; } fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize { const this: *@This() = @alignCast(@fieldParentPtr("reader", r)); const data = limit.slice(try w.writableSliceGreedy(1)); var vec: [1][]u8 = .{data}; const n = try this.in.readVec(&vec); this.hasher.update(data[0..n]); w.advance(n); return n; } fn readVec(r: *Reader, data: [][]u8) Error!usize { const this: *@This() = @alignCast(@fieldParentPtr("reader", r)); var vecs: [8][]u8 = undefined; // Arbitrarily chosen amount. const dest_n, const data_size = try r.writableVector(&vecs, data); const dest = vecs[0..dest_n]; const n = try this.in.readVec(dest); var remaining: usize = n; for (dest) |slice| { if (remaining < slice.len) { this.hasher.update(slice[0..remaining]); remaining = 0; break; } else { remaining -= slice.len; this.hasher.update(slice); } } assert(remaining == 0); if (n > data_size) { r.end += n - data_size; return data_size; } return n; } fn discard(r: *Reader, limit: Limit) Error!usize { const this: *@This() = @alignCast(@fieldParentPtr("reader", r)); const peeked = limit.slice(try this.in.peekGreedy(1)); this.hasher.update(peeked); this.in.toss(peeked.len); return peeked.len; } }; } pub fn writableVectorPosix(r: *Reader, buffer: []std.posix.iovec, data: []const []u8) Error!struct { usize, usize } { var i: usize = 0; var n: usize = 0; if (r.seek == r.end) { for (data) |buf| { if (buffer.len - i == 0) return .{ i, n }; if (buf.len != 0) { buffer[i] = .{ .base = buf.ptr, .len = buf.len }; i += 1; n += buf.len; } } const buf = r.buffer; if (buf.len != 0) { r.seek = 0; r.end = 0; buffer[i] = .{ .base = buf.ptr, .len = buf.len }; i += 1; } } else { const buf = r.buffer[r.end..]; buffer[i] = .{ .base = buf.ptr, .len = buf.len }; i += 1; } return .{ i, n }; } pub fn writableVectorWsa( r: *Reader, buffer: []std.os.windows.ws2_32.WSABUF, data: []const []u8, ) Error!struct { usize, usize } { var i: usize = 0; var n: usize = 0; if (r.seek == r.end) { for (data) |buf| { if (buffer.len - i == 0) return .{ i, n }; if (buf.len == 0) continue; if (std.math.cast(u32, buf.len)) |len| { buffer[i] = .{ .buf = buf.ptr, .len = len }; i += 1; n += len; continue; } buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) }; i += 1; n += std.math.maxInt(u32); return .{ i, n }; } const buf = r.buffer; if (buf.len != 0) { r.seek = 0; r.end = 0; if (std.math.cast(u32, buf.len)) |len| { buffer[i] = .{ .buf = buf.ptr, .len = len }; } else { buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) }; } i += 1; } } else { buffer[i] = .{ .buf = r.buffer.ptr + r.end, .len = @min(std.math.maxInt(u32), r.buffer.len - r.end), }; i += 1; } return .{ i, n }; } pub fn writableVector(r: *Reader, buffer: [][]u8, data: []const []u8) Error!struct { usize, usize } { var i: usize = 0; var n: usize = 0; if (r.seek == r.end) { for (data) |buf| { if (buffer.len - i == 0) return .{ i, n }; if (buf.len != 0) { buffer[i] = buf; i += 1; n += buf.len; } } if (r.buffer.len != 0) { r.seek = 0; r.end = 0; buffer[i] = r.buffer; i += 1; } } else { buffer[i] = r.buffer[r.end..]; i += 1; } return .{ i, n }; } test { _ = Limited; } |
Generated by zstd-live on 2025-08-13 02:35:12 UTC. |