Commit 2d35c16ee7

dweiller <4678790+dweiller@users.noreplay.github.com>
2023-01-31 02:54:05
std.compress.zstandard: add init/deinit for ring buffer, fix len()
1 parent e92575d
Changed files (1)
lib
std
compress
zstandard
lib/std/compress/zstandard/RingBuffer.zig
@@ -4,6 +4,7 @@
 //! if full or empty can be distinguised by looking at the different between the read and write
 //! indices without adding an extra boolean flag or having to reserve a slot in the buffer.
 
+const Allocator = @import("std").mem.Allocator;
 const assert = @import("std").debug.assert;
 
 const RingBuffer = @This();
@@ -12,6 +13,22 @@ data: []u8,
 read_index: usize,
 write_index: usize,
 
+/// Allocate a new `RingBuffer`
+pub fn init(allocator: Allocator, capacity: usize) Allocator.Error!RingBuffer {
+    const bytes = try allocator.alloc(u8, capacity);
+    return RingBuffer{
+        .data = bytes,
+        .write_index = 0,
+        .read_index = 0,
+    };
+}
+
+/// Free a `RingBuffer`
+pub fn deinit(self: *RingBuffer, allocator: Allocator) void {
+    allocator.free(self.data);
+    self.* = undefined;
+}
+
 /// Returns `index` modulo the length of the backing slice.
 pub fn mask(self: RingBuffer, index: usize) usize {
     return index % self.data.len;
@@ -70,7 +87,7 @@ pub fn isFull(self: RingBuffer) bool {
 
 /// Returns the length
 pub fn len(self: RingBuffer) usize {
-    const adjusted_write_index = self.write_index + @boolToInt(self.write_index < self.read_index) * 2 * self.data.len;
+    const adjusted_write_index = self.write_index + @as(usize, @boolToInt(self.write_index < self.read_index)) * 2 * self.data.len;
     return adjusted_write_index - self.read_index;
 }