Commit c7c35bf9e6

dweiller <4678790+dweiller@users.noreplay.github.com>
2023-02-21 02:07:44
std.RingBuffer: add (non-concurrent) RingBuffer implementation
1 parent 1c518bd
Changed files (5)
lib/std/compress/zstandard/decode/block.zig
@@ -1,5 +1,6 @@
 const std = @import("std");
 const assert = std.debug.assert;
+const RingBuffer = std.RingBuffer;
 
 const types = @import("../types.zig");
 const frame = types.frame;
@@ -8,9 +9,6 @@ const LiteralsSection = types.compressed_block.LiteralsSection;
 const SequencesSection = types.compressed_block.SequencesSection;
 
 const huffman = @import("huffman.zig");
-
-const RingBuffer = @import("../RingBuffer.zig");
-
 const readers = @import("../readers.zig");
 
 const decodeFseTable = @import("fse.zig").decodeFseTable;
lib/std/compress/zstandard/decompress.zig
@@ -1,6 +1,7 @@
 const std = @import("std");
 const assert = std.debug.assert;
 const Allocator = std.mem.Allocator;
+const RingBuffer = std.RingBuffer;
 
 const types = @import("types.zig");
 const frame = types.frame;
@@ -12,8 +13,6 @@ const Table = types.compressed_block.Table;
 
 pub const block = @import("decode/block.zig");
 
-pub const RingBuffer = @import("RingBuffer.zig");
-
 const readers = @import("readers.zig");
 
 const readInt = std.mem.readIntLittle;
lib/std/compress/zstandard.zig
@@ -1,11 +1,11 @@
 const std = @import("std");
 const Allocator = std.mem.Allocator;
+const RingBuffer = std.RingBuffer;
 
 const types = @import("zstandard/types.zig");
 pub const frame = types.frame;
 pub const compressed_block = types.compressed_block;
 
-const RingBuffer = @import("zstandard/RingBuffer.zig");
 pub const decompress = @import("zstandard/decompress.zig");
 
 pub fn DecompressStream(
lib/std/compress/zstandard/RingBuffer.zig → lib/std/RingBuffer.zig
@@ -1,8 +1,13 @@
-//! This ring buffer stores read and write indices while being able to utilise the full
-//! backing slice by incrementing the indices modulo twice the slice's length and reducing
-//! indices modulo the slice's length on slice access. This means that whether the ring buffer
-//! if full or empty can be distinguised by looking at the different between the read and write
-//! indices without adding an extra boolean flag or having to reserve a slot in the buffer.
+//! This ring buffer stores read and write indices while being able to utilise
+//! the full backing slice by incrementing the indices modulo twice the slice's
+//! length and reducing indices modulo the slice's length on slice access. This
+//! means that whether the ring buffer if full or empty can be distinguished by
+//! looking at the difference between the read and write indices without adding
+//! an extra boolean flag or having to reserve a slot in the buffer.
+//!
+//! This ring buffer has not been implemented with thread safety in mind, and
+//! therefore should not be assumed to be suitable for use cases involving
+//! separate reader and writer threads.
 
 const Allocator = @import("std").mem.Allocator;
 const assert = @import("std").debug.assert;
@@ -15,7 +20,7 @@ write_index: usize,
 
 pub const Error = error{Full};
 
-/// Allocate a new `RingBuffer`
+/// Allocate a new `RingBuffer`; `deinit()` should be called to free the buffer.
 pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
     const bytes = try allocator.alloc(u8, capacity);
     return RingBuffer{
@@ -25,7 +30,8 @@ pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
     };
 }
 
-/// Free a `RingBuffer`
+/// Free the data backing a `RingBuffer`; must be passed the same `Allocator` as
+/// `init()`.
 pub fn deinit(self: *RingBuffer, allocator: Allocator) void {
     allocator.free(self.data);
     self.* = undefined;
@@ -36,7 +42,7 @@ pub fn mask(self: RingBuffer, index: usize) usize {
     return index % self.data.len;
 }
 
-/// Returns `index` module twice the length of the backing slice.
+/// Returns `index` modulo twice the length of the backing slice.
 pub fn mask2(self: RingBuffer, index: usize) usize {
     return index % (2 * self.data.len);
 }
@@ -55,7 +61,7 @@ pub fn writeAssumeCapacity(self: *RingBuffer, byte: u8) void {
     self.write_index = self.mask2(self.write_index + 1);
 }
 
-/// Write `bytes` into the ring bufffer. Returns `error.Full` if the ring
+/// Write `bytes` into the ring buffer. Returns `error.Full` if the ring
 /// buffer does not have enough space, without writing any data.
 pub fn writeSlice(self: *RingBuffer, bytes: []const u8) Error!void {
     if (self.len() + bytes.len > self.data.len) return error.Full;
@@ -72,6 +78,13 @@ pub fn writeSliceAssumeCapacity(self: *RingBuffer, bytes: []const u8) void {
 /// ring buffer is empty.
 pub fn read(self: *RingBuffer) ?u8 {
     if (self.isEmpty()) return null;
+    return self.readAssumeLength();
+}
+
+/// Consume a byte from the ring buffer and return it; asserts that the buffer
+/// is not empty.
+pub fn readAssumeLength(self: *RingBuffer) u8 {
+    assert(!self.isEmpty());
     const byte = self.data[self.mask(self.read_index)];
     self.read_index = self.mask2(self.read_index + 1);
     return byte;
@@ -95,15 +108,15 @@ pub fn len(self: RingBuffer) usize {
 }
 
 /// A `Slice` represents a region of a ring buffer. The region is split into two
-/// sections as the ring buffer data will not be contiguous if the desired region
-/// wraps to the start of the backing slice.
+/// sections as the ring buffer data will not be contiguous if the desired
+/// region wraps to the start of the backing slice.
 pub const Slice = struct {
     first: []u8,
     second: []u8,
 };
 
-/// Returns a `Slice` for the region of the ring buffer staring at `self.mask(start_unmasked)`
-/// with the specified length.
+/// Returns a `Slice` for the region of the ring buffer starting at
+/// `self.mask(start_unmasked)` with the specified length.
 pub fn sliceAt(self: RingBuffer, start_unmasked: usize, length: usize) Slice {
     assert(length <= self.data.len);
     const slice1_start = self.mask(start_unmasked);
@@ -117,6 +130,7 @@ pub fn sliceAt(self: RingBuffer, start_unmasked: usize, length: usize) Slice {
 }
 
 /// Returns a `Slice` for the last `length` bytes written to the ring buffer.
+/// Does not check that any bytes have been written into the region.
 pub fn sliceLast(self: RingBuffer, length: usize) Slice {
     return self.sliceAt(self.write_index + self.data.len - length, length);
 }
lib/std/std.zig
@@ -31,6 +31,7 @@ pub const PackedIntSliceEndian = @import("packed_int_array.zig").PackedIntSliceE
 pub const PriorityQueue = @import("priority_queue.zig").PriorityQueue;
 pub const PriorityDequeue = @import("priority_dequeue.zig").PriorityDequeue;
 pub const Progress = @import("Progress.zig");
+pub const RingBuffer = @import("RingBuffer.zig");
 pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
 pub const SemanticVersion = @import("SemanticVersion.zig");
 pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;