Commit a4f05a4588

Andrew Kelley <andrew@ziglang.org>
2025-07-27 19:25:46
delete flate implementation
1 parent 83513ad
Changed files (58)
lib
std
compress
flate
testdata
lib/std/compress/flate/testdata/block_writer/huffman-null-max.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-null-max.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-null-max.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-null-max.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-null-max.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-null-max.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-pi.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-pi.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-pi.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-pi.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-pi.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-pi.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-1k.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-1k.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-1k.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-1k.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-1k.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-1k.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-limit.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-limit.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-limit.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-limit.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-limit.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-limit.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-max.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-rand-max.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-shifts.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-shifts.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-shifts.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-shifts.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-shifts.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-shifts.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text-shift.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text-shift.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text-shift.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text-shift.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text-shift.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text-shift.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-text.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-zero.dyn.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-zero.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-zero.huff.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-zero.input
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-zero.wb.expect
Binary file
lib/std/compress/flate/testdata/block_writer/huffman-zero.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/null-long-match.dyn.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer/null-long-match.wb.expect-noinput
Binary file
lib/std/compress/flate/testdata/block_writer.zig
Binary file
lib/std/compress/flate/BlockWriter.zig
@@ -8,32 +8,33 @@ const Writer = std.io.Writer;
 const BlockWriter = @This();
 const flate = @import("../flate.zig");
 const Compress = flate.Compress;
-const huffman = flate.huffman;
+const HuffmanEncoder = flate.HuffmanEncoder;
 const Token = @import("Token.zig");
 
-const codegen_order = huffman.codegen_order;
+const codegen_order = HuffmanEncoder.codegen_order;
 const end_code_mark = 255;
 
 output: *Writer,
 
-codegen_freq: [huffman.codegen_code_count]u16 = undefined,
-literal_freq: [huffman.max_num_lit]u16 = undefined,
-distance_freq: [huffman.distance_code_count]u16 = undefined,
-codegen: [huffman.max_num_lit + huffman.distance_code_count + 1]u8 = undefined,
-literal_encoding: Compress.LiteralEncoder = .{},
-distance_encoding: Compress.DistanceEncoder = .{},
-codegen_encoding: Compress.CodegenEncoder = .{},
-fixed_literal_encoding: Compress.LiteralEncoder,
-fixed_distance_encoding: Compress.DistanceEncoder,
-huff_distance: Compress.DistanceEncoder,
-
-pub fn init(output: *Writer) BlockWriter {
-    return .{
-        .output = output,
-        .fixed_literal_encoding = Compress.fixedLiteralEncoder(),
-        .fixed_distance_encoding = Compress.fixedDistanceEncoder(),
-        .huff_distance = Compress.huffmanDistanceEncoder(),
-    };
+codegen_freq: [HuffmanEncoder.codegen_code_count]u16,
+literal_freq: [HuffmanEncoder.max_num_lit]u16,
+distance_freq: [HuffmanEncoder.distance_code_count]u16,
+codegen: [HuffmanEncoder.max_num_lit + HuffmanEncoder.distance_code_count + 1]u8,
+literal_encoding: HuffmanEncoder,
+distance_encoding: HuffmanEncoder,
+codegen_encoding: HuffmanEncoder,
+fixed_literal_encoding: HuffmanEncoder,
+fixed_distance_encoding: HuffmanEncoder,
+huff_distance: HuffmanEncoder,
+
+fixed_literal_codes: [HuffmanEncoder.max_num_frequencies]HuffmanEncoder.Code,
+fixed_distance_codes: [HuffmanEncoder.distance_code_count]HuffmanEncoder.Code,
+distance_codes: [HuffmanEncoder.distance_code_count]HuffmanEncoder.Code,
+
+pub fn init(bw: *BlockWriter) void {
+    bw.fixed_literal_encoding = .fixedLiteralEncoder(&bw.fixed_literal_codes);
+    bw.fixed_distance_encoding = .fixedDistanceEncoder(&bw.fixed_distance_codes);
+    bw.huff_distance = .huffmanDistanceEncoder(&bw.distance_codes);
 }
 
 /// Flush intrenal bit buffer to the writer.
@@ -46,27 +47,23 @@ pub fn flush(self: *BlockWriter) Writer.Error!void {
     try self.bit_writer.flush();
 }
 
-pub fn setWriter(self: *BlockWriter, new_writer: *Writer) void {
-    self.bit_writer.setWriter(new_writer);
-}
-
 fn writeCode(self: *BlockWriter, c: Compress.HuffCode) Writer.Error!void {
     try self.bit_writer.writeBits(c.code, c.len);
 }
 
-// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
-// the literal and distance lengths arrays (which are concatenated into a single
-// array).  This method generates that run-length encoding.
-//
-// The result is written into the codegen array, and the frequencies
-// of each code is written into the codegen_freq array.
-// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
-// information. Code bad_code is an end marker
-//
-// num_literals: The number of literals in literal_encoding
-// num_distances: The number of distances in distance_encoding
-// lit_enc: The literal encoder to use
-// dist_enc: The distance encoder to use
+/// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+/// the literal and distance lengths arrays (which are concatenated into a single
+/// array).  This method generates that run-length encoding.
+///
+/// The result is written into the codegen array, and the frequencies
+/// of each code is written into the codegen_freq array.
+/// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+/// information. Code bad_code is an end marker
+///
+/// num_literals: The number of literals in literal_encoding
+/// num_distances: The number of distances in distance_encoding
+/// lit_enc: The literal encoder to use
+/// dist_enc: The distance encoder to use
 fn generateCodegen(
     self: *BlockWriter,
     num_literals: u32,
@@ -167,7 +164,7 @@ const DynamicSize = struct {
     num_codegens: u32,
 };
 
-// dynamicSize returns the size of dynamically encoded data in bits.
+/// dynamicSize returns the size of dynamically encoded data in bits.
 fn dynamicSize(
     self: *BlockWriter,
     lit_enc: *Compress.LiteralEncoder, // literal encoder
@@ -194,7 +191,7 @@ fn dynamicSize(
     };
 }
 
-// fixedSize returns the size of dynamically encoded data in bits.
+/// fixedSize returns the size of dynamically encoded data in bits.
 fn fixedSize(self: *BlockWriter, extra_bits: u32) u32 {
     return 3 +
         self.fixed_literal_encoding.bitLength(&self.literal_freq) +
@@ -207,25 +204,25 @@ const StoredSize = struct {
     storable: bool,
 };
 
-// storedSizeFits calculates the stored size, including header.
-// The function returns the size in bits and whether the block
-// fits inside a single block.
+/// storedSizeFits calculates the stored size, including header.
+/// The function returns the size in bits and whether the block
+/// fits inside a single block.
 fn storedSizeFits(in: ?[]const u8) StoredSize {
     if (in == null) {
         return .{ .size = 0, .storable = false };
     }
-    if (in.?.len <= huffman.max_store_block_size) {
+    if (in.?.len <= HuffmanEncoder.max_store_block_size) {
         return .{ .size = @as(u32, @intCast((in.?.len + 5) * 8)), .storable = true };
     }
     return .{ .size = 0, .storable = false };
 }
 
-// Write the header of a dynamic Huffman block to the output stream.
-//
-//  num_literals: The number of literals specified in codegen
-//  num_distances: The number of distances specified in codegen
-//  num_codegens: The number of codegens used in codegen
-//  eof: Is it the end-of-file? (end of stream)
+/// Write the header of a dynamic Huffman block to the output stream.
+///
+///  num_literals: The number of literals specified in codegen
+///  num_distances: The number of distances specified in codegen
+///  num_codegens: The number of codegens used in codegen
+///  eof: Is it the end-of-file? (end of stream)
 fn dynamicHeader(
     self: *BlockWriter,
     num_literals: u32,
@@ -291,11 +288,11 @@ fn fixedHeader(self: *BlockWriter, eof: bool) Writer.Error!void {
     try self.bit_writer.writeBits(value, 3);
 }
 
-// Write a block of tokens with the smallest encoding. Will choose block type.
-// The original input can be supplied, and if the huffman encoded data
-// is larger than the original bytes, the data will be written as a
-// stored block.
-// If the input is null, the tokens will always be Huffman encoded.
+/// Write a block of tokens with the smallest encoding. Will choose block type.
+/// The original input can be supplied, and if the huffman encoded data
+/// is larger than the original bytes, the data will be written as a
+/// stored block.
+/// If the input is null, the tokens will always be Huffman encoded.
 pub fn write(self: *BlockWriter, tokens: []const Token, eof: bool, input: ?[]const u8) Writer.Error!void {
     const lit_and_dist = self.indexTokens(tokens);
     const num_literals = lit_and_dist.num_literals;
@@ -379,11 +376,11 @@ pub fn storedBlock(self: *BlockWriter, input: []const u8, eof: bool) Writer.Erro
     try self.bit_writer.writeBytes(input);
 }
 
-// writeBlockDynamic encodes a block using a dynamic Huffman table.
-// This should be used if the symbols used have a disproportionate
-// histogram distribution.
-// If input is supplied and the compression savings are below 1/16th of the
-// input size the block is stored.
+/// writeBlockDynamic encodes a block using a dynamic Huffman table.
+/// This should be used if the symbols used have a disproportionate
+/// histogram distribution.
+/// If input is supplied and the compression savings are below 1/16th of the
+/// input size the block is stored.
 fn dynamicBlock(
     self: *BlockWriter,
     tokens: []const Token,
@@ -429,10 +426,10 @@ const TotalIndexedTokens = struct {
     num_distances: u32,
 };
 
-// Indexes a slice of tokens followed by an end_block_marker, and updates
-// literal_freq and distance_freq, and generates literal_encoding
-// and distance_encoding.
-// The number of literal and distance tokens is returned.
+/// Indexes a slice of tokens followed by an end_block_marker, and updates
+/// literal_freq and distance_freq, and generates literal_encoding
+/// and distance_encoding.
+/// The number of literal and distance tokens is returned.
 fn indexTokens(self: *BlockWriter, tokens: []const Token) TotalIndexedTokens {
     var num_literals: u32 = 0;
     var num_distances: u32 = 0;
@@ -453,7 +450,7 @@ fn indexTokens(self: *BlockWriter, tokens: []const Token) TotalIndexedTokens {
         self.distance_freq[t.distanceCode()] += 1;
     }
     // add end_block_marker token at the end
-    self.literal_freq[huffman.end_block_marker] += 1;
+    self.literal_freq[HuffmanEncoder.end_block_marker] += 1;
 
     // get the number of literals
     num_literals = @as(u32, @intCast(self.literal_freq.len));
@@ -479,8 +476,8 @@ fn indexTokens(self: *BlockWriter, tokens: []const Token) TotalIndexedTokens {
     };
 }
 
-// Writes a slice of tokens to the output followed by and end_block_marker.
-// codes for literal and distance encoding must be supplied.
+/// Writes a slice of tokens to the output followed by and end_block_marker.
+/// codes for literal and distance encoding must be supplied.
 fn writeTokens(
     self: *BlockWriter,
     tokens: []const Token,
@@ -508,18 +505,18 @@ fn writeTokens(
         }
     }
     // add end_block_marker at the end
-    try self.writeCode(le_codes[huffman.end_block_marker]);
+    try self.writeCode(le_codes[HuffmanEncoder.end_block_marker]);
 }
 
-// Encodes a block of bytes as either Huffman encoded literals or uncompressed bytes
-// if the results only gains very little from compression.
+/// Encodes a block of bytes as either Huffman encoded literals or uncompressed bytes
+/// if the results only gains very little from compression.
 pub fn huffmanBlock(self: *BlockWriter, input: []const u8, eof: bool) Writer.Error!void {
     // Add everything as literals
     histogram(input, &self.literal_freq);
 
-    self.literal_freq[huffman.end_block_marker] = 1;
+    self.literal_freq[HuffmanEncoder.end_block_marker] = 1;
 
-    const num_literals = huffman.end_block_marker + 1;
+    const num_literals = HuffmanEncoder.end_block_marker + 1;
     self.distance_freq[0] = 1;
     const num_distances = 1;
 
@@ -560,10 +557,9 @@ pub fn huffmanBlock(self: *BlockWriter, input: []const u8, eof: bool) Writer.Err
         const c = encoding[t];
         try self.bit_writer.writeBits(c.code, c.len);
     }
-    try self.writeCode(encoding[huffman.end_block_marker]);
+    try self.writeCode(encoding[HuffmanEncoder.end_block_marker]);
 }
 
-// histogram accumulates a histogram of b in h.
 fn histogram(b: []const u8, h: *[286]u16) void {
     // Clear histogram
     for (h, 0..) |_, i| {
@@ -575,122 +571,3 @@ fn histogram(b: []const u8, h: *[286]u16) void {
         lh[t] += 1;
     }
 }
-
-// tests
-const expect = std.testing.expect;
-const fmt = std.fmt;
-const testing = std.testing;
-const ArrayList = std.ArrayList;
-
-const TestCase = @import("testdata/block_writer.zig").TestCase;
-const testCases = @import("testdata/block_writer.zig").testCases;
-
-// tests if the writeBlock encoding has changed.
-test "write" {
-    inline for (0..testCases.len) |i| {
-        try testBlock(testCases[i], .write_block);
-    }
-}
-
-// tests if the writeBlockDynamic encoding has changed.
-test "dynamicBlock" {
-    inline for (0..testCases.len) |i| {
-        try testBlock(testCases[i], .write_dyn_block);
-    }
-}
-
-test "huffmanBlock" {
-    inline for (0..testCases.len) |i| {
-        try testBlock(testCases[i], .write_huffman_block);
-    }
-    try testBlock(.{
-        .tokens = &[_]Token{},
-        .input = "huffman-rand-max.input",
-        .want = "huffman-rand-max.{s}.expect",
-    }, .write_huffman_block);
-}
-
-const TestFn = enum {
-    write_block,
-    write_dyn_block, // write dynamic block
-    write_huffman_block,
-
-    fn to_s(self: TestFn) []const u8 {
-        return switch (self) {
-            .write_block => "wb",
-            .write_dyn_block => "dyn",
-            .write_huffman_block => "huff",
-        };
-    }
-
-    fn write(
-        comptime self: TestFn,
-        bw: anytype,
-        tok: []const Token,
-        input: ?[]const u8,
-        final: bool,
-    ) !void {
-        switch (self) {
-            .write_block => try bw.write(tok, final, input),
-            .write_dyn_block => try bw.dynamicBlock(tok, final, input),
-            .write_huffman_block => try bw.huffmanBlock(input.?, final),
-        }
-        try bw.flush();
-    }
-};
-
-// testBlock tests a block against its references
-//
-// size
-//  64K [file-name].input                  - input non compressed file
-// 8.1K [file-name].golden                 -
-//   78 [file-name].dyn.expect             - output with writeBlockDynamic
-//   78 [file-name].wb.expect              - output with writeBlock
-// 8.1K [file-name].huff.expect            - output with writeBlockHuff
-//   78 [file-name].dyn.expect-noinput     - output with writeBlockDynamic when input is null
-//   78 [file-name].wb.expect-noinput      - output with writeBlock when input is null
-//
-//   wb   - writeBlock
-//   dyn  - writeBlockDynamic
-//   huff - writeBlockHuff
-//
-fn testBlock(comptime tc: TestCase, comptime tfn: TestFn) !void {
-    if (tc.input.len != 0 and tc.want.len != 0) {
-        const want_name = comptime fmt.comptimePrint(tc.want, .{tfn.to_s()});
-        const input = @embedFile("testdata/block_writer/" ++ tc.input);
-        const want = @embedFile("testdata/block_writer/" ++ want_name);
-        try testWriteBlock(tfn, input, want, tc.tokens);
-    }
-
-    if (tfn == .write_huffman_block) {
-        return;
-    }
-
-    const want_name_no_input = comptime fmt.comptimePrint(tc.want_no_input, .{tfn.to_s()});
-    const want = @embedFile("testdata/block_writer/" ++ want_name_no_input);
-    try testWriteBlock(tfn, null, want, tc.tokens);
-}
-
-// Uses writer function `tfn` to write `tokens`, tests that we got `want` as output.
-fn testWriteBlock(comptime tfn: TestFn, input: ?[]const u8, want: []const u8, tokens: []const Token) !void {
-    var buf = ArrayList(u8).init(testing.allocator);
-    var bw: BlockWriter = .init(buf.writer());
-    try tfn.write(&bw, tokens, input, false);
-    var got = buf.items;
-    try testing.expectEqualSlices(u8, want, got); // expect writeBlock to yield expected result
-    try expect(got[0] & 0b0000_0001 == 0); // bfinal is not set
-    //
-    // Test if the writer produces the same output after reset.
-    buf.deinit();
-    buf = ArrayList(u8).init(testing.allocator);
-    defer buf.deinit();
-    bw.setWriter(buf.writer());
-
-    try tfn.write(&bw, tokens, input, true);
-    try bw.flush();
-    got = buf.items;
-
-    try expect(got[0] & 1 == 1); // bfinal is set
-    buf.items[0] &= 0b1111_1110; // remove bfinal bit, so we can run test slices
-    try testing.expectEqualSlices(u8, want, got); // expect writeBlock to yield expected result
-}
lib/std/compress/flate/Compress.zig
@@ -39,6 +39,7 @@
 //!
 //!
 //! Allocates statically ~400K (192K lookup, 128K tokens, 64K window).
+
 const builtin = @import("builtin");
 const std = @import("std");
 const assert = std.debug.assert;
@@ -47,7 +48,6 @@ const expect = testing.expect;
 const mem = std.mem;
 const math = std.math;
 const Writer = std.Io.Writer;
-const Reader = std.Io.Reader;
 
 const Compress = @This();
 const Token = @import("Token.zig");
@@ -55,22 +55,24 @@ const BlockWriter = @import("BlockWriter.zig");
 const flate = @import("../flate.zig");
 const Container = flate.Container;
 const Lookup = @import("Lookup.zig");
-const huffman = flate.huffman;
+const HuffmanEncoder = flate.HuffmanEncoder;
+const LiteralNode = HuffmanEncoder.LiteralNode;
 
 lookup: Lookup = .{},
 tokens: Tokens = .{},
-/// Asserted to have a buffer capacity of at least `flate.max_window_len`.
-input: *Reader,
 block_writer: BlockWriter,
 level: LevelArgs,
 hasher: Container.Hasher,
-reader: Reader,
+writer: Writer,
+state: State,
 
 // Match and literal at the previous position.
 // Used for lazy match finding in processWindow.
 prev_match: ?Token = null,
 prev_literal: ?u8 = null,
 
+pub const State = enum { header, middle, ended };
+
 /// Trades between speed and compression size.
 /// Starts with level 4: in [zlib](https://github.com/madler/zlib/blob/abd3d1a28930f89375d4b41408b39f6c1be157b2/deflate.c#L115C1-L117C43)
 /// levels 1-3 are using different algorithm to perform faster but with less
@@ -118,188 +120,34 @@ pub const Options = struct {
     container: Container = .raw,
 };
 
-pub fn init(input: *Reader, buffer: []u8, options: Options) Compress {
+pub fn init(output: *Writer, buffer: []u8, options: Options) Compress {
     return .{
-        .input = input,
-        .block_writer = undefined,
+        .block_writer = .{
+            .output = output,
+            .codegen_freq = undefined,
+            .literal_freq = undefined,
+            .distance_freq = undefined,
+            .codegen = undefined,
+            .literal_encoding = undefined,
+            .distance_encoding = undefined,
+            .codegen_encoding = undefined,
+            .fixed_literal_encoding = undefined,
+            .fixed_distance_encoding = undefined,
+            .huff_distance = undefined,
+            .fixed_literal_codes = undefined,
+            .fixed_distance_codes = undefined,
+            .distance_codes = undefined,
+        },
         .level = .get(options.level),
         .hasher = .init(options.container),
         .state = .header,
-        .reader = .{
+        .writer = .{
             .buffer = buffer,
-            .stream = stream,
+            .vtable = &.{ .drain = drain },
         },
     };
 }
 
-const FlushOption = enum { none, flush, final };
-
-/// Process data in window and create tokens. If token buffer is full
-/// flush tokens to the token writer.
-///
-/// Returns number of bytes consumed from `lh`.
-fn tokenizeSlice(c: *Compress, bw: *Writer, limit: std.Io.Limit, lh: []const u8) !usize {
-    _ = bw;
-    _ = limit;
-    if (true) @panic("TODO");
-    var step: u16 = 1; // 1 in the case of literal, match length otherwise
-    const pos: u16 = c.win.pos();
-    const literal = lh[0]; // literal at current position
-    const min_len: u16 = if (c.prev_match) |m| m.length() else 0;
-
-    // Try to find match at least min_len long.
-    if (c.findMatch(pos, lh, min_len)) |match| {
-        // Found better match than previous.
-        try c.addPrevLiteral();
-
-        // Is found match length good enough?
-        if (match.length() >= c.level.lazy) {
-            // Don't try to lazy find better match, use this.
-            step = try c.addMatch(match);
-        } else {
-            // Store this match.
-            c.prev_literal = literal;
-            c.prev_match = match;
-        }
-    } else {
-        // There is no better match at current pos then it was previous.
-        // Write previous match or literal.
-        if (c.prev_match) |m| {
-            // Write match from previous position.
-            step = try c.addMatch(m) - 1; // we already advanced 1 from previous position
-        } else {
-            // No match at previous position.
-            // Write previous literal if any, and remember this literal.
-            try c.addPrevLiteral();
-            c.prev_literal = literal;
-        }
-    }
-    // Advance window and add hashes.
-    c.windowAdvance(step, lh, pos);
-}
-
-fn windowAdvance(self: *Compress, step: u16, lh: []const u8, pos: u16) void {
-    // current position is already added in findMatch
-    self.lookup.bulkAdd(lh[1..], step - 1, pos + 1);
-    self.win.advance(step);
-}
-
-// Add previous literal (if any) to the tokens list.
-fn addPrevLiteral(self: *Compress) !void {
-    if (self.prev_literal) |l| try self.addToken(Token.initLiteral(l));
-}
-
-// Add match to the tokens list, reset prev pointers.
-// Returns length of the added match.
-fn addMatch(self: *Compress, m: Token) !u16 {
-    try self.addToken(m);
-    self.prev_literal = null;
-    self.prev_match = null;
-    return m.length();
-}
-
-fn addToken(self: *Compress, token: Token) !void {
-    self.tokens.add(token);
-    if (self.tokens.full()) try self.flushTokens(.none);
-}
-
-// Finds largest match in the history window with the data at current pos.
-fn findMatch(self: *Compress, pos: u16, lh: []const u8, min_len: u16) ?Token {
-    var len: u16 = min_len;
-    // Previous location with the same hash (same 4 bytes).
-    var prev_pos = self.lookup.add(lh, pos);
-    // Last found match.
-    var match: ?Token = null;
-
-    // How much back-references to try, performance knob.
-    var chain: usize = self.level.chain;
-    if (len >= self.level.good) {
-        // If we've got a match that's good enough, only look in 1/4 the chain.
-        chain >>= 2;
-    }
-
-    // Hot path loop!
-    while (prev_pos > 0 and chain > 0) : (chain -= 1) {
-        const distance = pos - prev_pos;
-        if (distance > flate.match.max_distance)
-            break;
-
-        const new_len = self.win.match(prev_pos, pos, len);
-        if (new_len > len) {
-            match = Token.initMatch(@intCast(distance), new_len);
-            if (new_len >= self.level.nice) {
-                // The match is good enough that we don't try to find a better one.
-                return match;
-            }
-            len = new_len;
-        }
-        prev_pos = self.lookup.prev(prev_pos);
-    }
-
-    return match;
-}
-
-fn flushTokens(self: *Compress, flush_opt: FlushOption) !void {
-    // Pass tokens to the token writer
-    try self.block_writer.write(self.tokens.tokens(), flush_opt == .final, self.win.tokensBuffer());
-    // Stored block ensures byte alignment.
-    // It has 3 bits (final, block_type) and then padding until byte boundary.
-    // After that everything is aligned to the boundary in the stored block.
-    // Empty stored block is Ob000 + (0-7) bits of padding + 0x00 0x00 0xFF 0xFF.
-    // Last 4 bytes are byte aligned.
-    if (flush_opt == .flush) {
-        try self.block_writer.storedBlock("", false);
-    }
-    if (flush_opt != .none) {
-        // Safe to call only when byte aligned or it is OK to add
-        // padding bits (on last byte of the final block).
-        try self.block_writer.flush();
-    }
-    // Reset internal tokens store.
-    self.tokens.reset();
-    // Notify win that tokens are flushed.
-    self.win.flush();
-}
-
-// Slide win and if needed lookup tables.
-fn slide(self: *Compress) void {
-    const n = self.win.slide();
-    self.lookup.slide(n);
-}
-
-/// Flushes internal buffers to the output writer. Outputs empty stored
-/// block to sync bit stream to the byte boundary, so that the
-/// decompressor can get all input data available so far.
-///
-/// It is useful mainly in compressed network protocols, to ensure that
-/// deflate bit stream can be used as byte stream. May degrade
-/// compression so it should be used only when necessary.
-///
-/// Completes the current deflate block and follows it with an empty
-/// stored block that is three zero bits plus filler bits to the next
-/// byte, followed by four bytes (00 00 ff ff).
-///
-pub fn flush(c: *Compress) !void {
-    try c.tokenize(.flush);
-}
-
-/// Completes deflate bit stream by writing any pending data as deflate
-/// final deflate block. HAS to be called once all data are written to
-/// the compressor as a signal that next block has to have final bit
-/// set.
-///
-pub fn finish(c: *Compress) !void {
-    _ = c;
-    @panic("TODO");
-}
-
-/// Use another writer while preserving history. Most probably flush
-/// should be called on old writer before setting new.
-pub fn setWriter(self: *Compress, new_writer: *Writer) void {
-    self.block_writer.setWriter(new_writer);
-    self.output = new_writer;
-}
-
 // Tokens store
 const Tokens = struct {
     list: [n_tokens]Token = undefined,
@@ -323,527 +171,110 @@ const Tokens = struct {
     }
 };
 
-/// Creates huffman only deflate blocks. Disables Lempel-Ziv match searching and
-/// only performs Huffman entropy encoding. Results in faster compression, much
-/// less memory requirements during compression but bigger compressed sizes.
-pub const Huffman = SimpleCompressor(.huffman, .raw);
-
-/// Creates store blocks only. Data are not compressed only packed into deflate
-/// store blocks. That adds 9 bytes of header for each block. Max stored block
-/// size is 64K. Block is emitted when flush is called on on finish.
-pub const store = struct {
-    pub fn Compressor(comptime container: Container, comptime WriterType: type) type {
-        return SimpleCompressor(.store, container, WriterType);
-    }
-
-    pub fn compressor(comptime container: Container, writer: anytype) !store.Compressor(container, @TypeOf(writer)) {
-        return try store.Compressor(container, @TypeOf(writer)).init(writer);
+fn drain(me: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {
+    _ = data;
+    _ = splat;
+    const c: *Compress = @fieldParentPtr("writer", me);
+    const out = c.block_writer.output;
+    switch (c.state) {
+        .header => {
+            c.state = .middle;
+            const header = c.hasher.container().header();
+            try out.writeAll(header);
+            return header.len;
+        },
+        .middle => {},
+        .ended => unreachable,
     }
-};
 
-const SimpleCompressorKind = enum {
-    huffman,
-    store,
-};
+    const buffered = me.buffered();
+    const min_lookahead = flate.match.min_length + flate.match.max_length;
+    const history_plus_lookahead_len = flate.history_len + min_lookahead;
+    if (buffered.len < history_plus_lookahead_len) return 0;
+    const lookahead = buffered[flate.history_len..];
 
-fn simpleCompressor(
-    comptime kind: SimpleCompressorKind,
-    comptime container: Container,
-    writer: anytype,
-) !SimpleCompressor(kind, container, @TypeOf(writer)) {
-    return try SimpleCompressor(kind, container, @TypeOf(writer)).init(writer);
+    _ = lookahead;
+    // TODO tokenize
+    //c.hasher.update(lookahead[0..n]);
+    @panic("TODO");
 }
 
-fn SimpleCompressor(
-    comptime kind: SimpleCompressorKind,
-    comptime container: Container,
-    comptime WriterType: type,
-) type {
-    const BlockWriterType = BlockWriter(WriterType);
-    return struct {
-        buffer: [65535]u8 = undefined, // because store blocks are limited to 65535 bytes
-        wp: usize = 0,
-
-        output: WriterType,
-        block_writer: BlockWriterType,
-        hasher: container.Hasher() = .{},
-
-        const Self = @This();
-
-        pub fn init(output: WriterType) !Self {
-            const self = Self{
-                .output = output,
-                .block_writer = BlockWriterType.init(output),
-            };
-            try container.writeHeader(self.output);
-            return self;
-        }
-
-        pub fn flush(self: *Self) !void {
-            try self.flushBuffer(false);
-            try self.block_writer.storedBlock("", false);
-            try self.block_writer.flush();
-        }
-
-        pub fn finish(self: *Self) !void {
-            try self.flushBuffer(true);
-            try self.block_writer.flush();
-            try container.writeFooter(&self.hasher, self.output);
-        }
-
-        fn flushBuffer(self: *Self, final: bool) !void {
-            const buf = self.buffer[0..self.wp];
-            switch (kind) {
-                .huffman => try self.block_writer.huffmanBlock(buf, final),
-                .store => try self.block_writer.storedBlock(buf, final),
-            }
-            self.wp = 0;
-        }
-    };
+pub fn end(c: *Compress) !void {
+    try endUnflushed(c);
+    try c.output.flush();
 }
 
-const LiteralNode = struct {
-    literal: u16,
-    freq: u16,
-};
-
-// Describes the state of the constructed tree for a given depth.
-const LevelInfo = struct {
-    // Our level.  for better printing
-    level: u32,
-
-    // The frequency of the last node at this level
-    last_freq: u32,
+pub fn endUnflushed(c: *Compress) !void {
+    while (c.writer.end != 0) _ = try drain(&c.writer, &.{""}, 1);
+    c.state = .ended;
 
-    // The frequency of the next character to add to this level
-    next_char_freq: u32,
+    const out = c.block_writer.output;
 
-    // The frequency of the next pair (from level below) to add to this level.
-    // Only valid if the "needed" value of the next lower level is 0.
-    next_pair_freq: u32,
-
-    // The number of chains remaining to generate for this level before moving
-    // up to the next level
-    needed: u32,
-};
+    // TODO flush tokens
 
-// hcode is a huffman code with a bit code and bit length.
-pub const HuffCode = struct {
-    code: u16 = 0,
-    len: u16 = 0,
-
-    // set sets the code and length of an hcode.
-    fn set(self: *HuffCode, code: u16, length: u16) void {
-        self.len = length;
-        self.code = code;
+    switch (c.hasher) {
+        .gzip => |*gzip| {
+            // GZIP 8 bytes footer
+            //  - 4 bytes, CRC32 (CRC-32)
+            //  - 4 bytes, ISIZE (Input SIZE) - size of the original (uncompressed) input data modulo 2^32
+            const footer = try out.writableArray(8);
+            std.mem.writeInt(u32, footer[0..4], gzip.crc.final(), .little);
+            std.mem.writeInt(u32, footer[4..8], @truncate(gzip.count), .little);
+        },
+        .zlib => |*zlib| {
+            // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
+            // 4 bytes of ADLER32 (Adler-32 checksum)
+            // Checksum value of the uncompressed data (excluding any
+            // dictionary data) computed according to Adler-32
+            // algorithm.
+            std.mem.writeInt(u32, try out.writableArray(4), zlib.final, .big);
+        },
+        .raw => {},
     }
-};
-
-pub fn HuffmanEncoder(comptime size: usize) type {
-    return struct {
-        codes: [size]HuffCode = undefined,
-        // Reusable buffer with the longest possible frequency table.
-        freq_cache: [huffman.max_num_frequencies + 1]LiteralNode = undefined,
-        bit_count: [17]u32 = undefined,
-        lns: []LiteralNode = undefined, // sorted by literal, stored to avoid repeated allocation in generate
-        lfs: []LiteralNode = undefined, // sorted by frequency, stored to avoid repeated allocation in generate
-
-        const Self = @This();
-
-        // Update this Huffman Code object to be the minimum code for the specified frequency count.
-        //
-        // freq  An array of frequencies, in which frequency[i] gives the frequency of literal i.
-        // max_bits  The maximum number of bits to use for any literal.
-        pub fn generate(self: *Self, freq: []u16, max_bits: u32) void {
-            var list = self.freq_cache[0 .. freq.len + 1];
-            // Number of non-zero literals
-            var count: u32 = 0;
-            // Set list to be the set of all non-zero literals and their frequencies
-            for (freq, 0..) |f, i| {
-                if (f != 0) {
-                    list[count] = LiteralNode{ .literal = @as(u16, @intCast(i)), .freq = f };
-                    count += 1;
-                } else {
-                    list[count] = LiteralNode{ .literal = 0x00, .freq = 0 };
-                    self.codes[i].len = 0;
-                }
-            }
-            list[freq.len] = LiteralNode{ .literal = 0x00, .freq = 0 };
-
-            list = list[0..count];
-            if (count <= 2) {
-                // Handle the small cases here, because they are awkward for the general case code. With
-                // two or fewer literals, everything has bit length 1.
-                for (list, 0..) |node, i| {
-                    // "list" is in order of increasing literal value.
-                    self.codes[node.literal].set(@as(u16, @intCast(i)), 1);
-                }
-                return;
-            }
-            self.lfs = list;
-            mem.sort(LiteralNode, self.lfs, {}, byFreq);
-
-            // Get the number of literals for each bit count
-            const bit_count = self.bitCounts(list, max_bits);
-            // And do the assignment
-            self.assignEncodingAndSize(bit_count, list);
-        }
-
-        pub fn bitLength(self: *Self, freq: []u16) u32 {
-            var total: u32 = 0;
-            for (freq, 0..) |f, i| {
-                if (f != 0) {
-                    total += @as(u32, @intCast(f)) * @as(u32, @intCast(self.codes[i].len));
-                }
-            }
-            return total;
-        }
-
-        // Return the number of literals assigned to each bit size in the Huffman encoding
-        //
-        // This method is only called when list.len >= 3
-        // The cases of 0, 1, and 2 literals are handled by special case code.
-        //
-        // list: An array of the literals with non-zero frequencies
-        // and their associated frequencies. The array is in order of increasing
-        // frequency, and has as its last element a special element with frequency
-        // `math.maxInt(i32)`
-        //
-        // max_bits: The maximum number of bits that should be used to encode any literal.
-        // Must be less than 16.
-        //
-        // Returns an integer array in which array[i] indicates the number of literals
-        // that should be encoded in i bits.
-        fn bitCounts(self: *Self, list: []LiteralNode, max_bits_to_use: usize) []u32 {
-            var max_bits = max_bits_to_use;
-            const n = list.len;
-            const max_bits_limit = 16;
-
-            assert(max_bits < max_bits_limit);
-
-            // The tree can't have greater depth than n - 1, no matter what. This
-            // saves a little bit of work in some small cases
-            max_bits = @min(max_bits, n - 1);
-
-            // Create information about each of the levels.
-            // A bogus "Level 0" whose sole purpose is so that
-            // level1.prev.needed == 0.  This makes level1.next_pair_freq
-            // be a legitimate value that never gets chosen.
-            var levels: [max_bits_limit]LevelInfo = mem.zeroes([max_bits_limit]LevelInfo);
-            // leaf_counts[i] counts the number of literals at the left
-            // of ancestors of the rightmost node at level i.
-            // leaf_counts[i][j] is the number of literals at the left
-            // of the level j ancestor.
-            var leaf_counts: [max_bits_limit][max_bits_limit]u32 = mem.zeroes([max_bits_limit][max_bits_limit]u32);
-
-            {
-                var level = @as(u32, 1);
-                while (level <= max_bits) : (level += 1) {
-                    // For every level, the first two items are the first two characters.
-                    // We initialize the levels as if we had already figured this out.
-                    levels[level] = LevelInfo{
-                        .level = level,
-                        .last_freq = list[1].freq,
-                        .next_char_freq = list[2].freq,
-                        .next_pair_freq = list[0].freq + list[1].freq,
-                        .needed = 0,
-                    };
-                    leaf_counts[level][level] = 2;
-                    if (level == 1) {
-                        levels[level].next_pair_freq = math.maxInt(i32);
-                    }
-                }
-            }
-
-            // We need a total of 2*n - 2 items at top level and have already generated 2.
-            levels[max_bits].needed = 2 * @as(u32, @intCast(n)) - 4;
-
-            {
-                var level = max_bits;
-                while (true) {
-                    var l = &levels[level];
-                    if (l.next_pair_freq == math.maxInt(i32) and l.next_char_freq == math.maxInt(i32)) {
-                        // We've run out of both leaves and pairs.
-                        // End all calculations for this level.
-                        // To make sure we never come back to this level or any lower level,
-                        // set next_pair_freq impossibly large.
-                        l.needed = 0;
-                        levels[level + 1].next_pair_freq = math.maxInt(i32);
-                        level += 1;
-                        continue;
-                    }
-
-                    const prev_freq = l.last_freq;
-                    if (l.next_char_freq < l.next_pair_freq) {
-                        // The next item on this row is a leaf node.
-                        const next = leaf_counts[level][level] + 1;
-                        l.last_freq = l.next_char_freq;
-                        // Lower leaf_counts are the same of the previous node.
-                        leaf_counts[level][level] = next;
-                        if (next >= list.len) {
-                            l.next_char_freq = maxNode().freq;
-                        } else {
-                            l.next_char_freq = list[next].freq;
-                        }
-                    } else {
-                        // The next item on this row is a pair from the previous row.
-                        // next_pair_freq isn't valid until we generate two
-                        // more values in the level below
-                        l.last_freq = l.next_pair_freq;
-                        // Take leaf counts from the lower level, except counts[level] remains the same.
-                        @memcpy(leaf_counts[level][0..level], leaf_counts[level - 1][0..level]);
-                        levels[l.level - 1].needed = 2;
-                    }
-
-                    l.needed -= 1;
-                    if (l.needed == 0) {
-                        // We've done everything we need to do for this level.
-                        // Continue calculating one level up. Fill in next_pair_freq
-                        // of that level with the sum of the two nodes we've just calculated on
-                        // this level.
-                        if (l.level == max_bits) {
-                            // All done!
-                            break;
-                        }
-                        levels[l.level + 1].next_pair_freq = prev_freq + l.last_freq;
-                        level += 1;
-                    } else {
-                        // If we stole from below, move down temporarily to replenish it.
-                        while (levels[level - 1].needed > 0) {
-                            level -= 1;
-                            if (level == 0) {
-                                break;
-                            }
-                        }
-                    }
-                }
-            }
-
-            // Somethings is wrong if at the end, the top level is null or hasn't used
-            // all of the leaves.
-            assert(leaf_counts[max_bits][max_bits] == n);
-
-            var bit_count = self.bit_count[0 .. max_bits + 1];
-            var bits: u32 = 1;
-            const counts = &leaf_counts[max_bits];
-            {
-                var level = max_bits;
-                while (level > 0) : (level -= 1) {
-                    // counts[level] gives the number of literals requiring at least "bits"
-                    // bits to encode.
-                    bit_count[bits] = counts[level] - counts[level - 1];
-                    bits += 1;
-                    if (level == 0) {
-                        break;
-                    }
-                }
-            }
-            return bit_count;
-        }
-
-        // Look at the leaves and assign them a bit count and an encoding as specified
-        // in RFC 1951 3.2.2
-        fn assignEncodingAndSize(self: *Self, bit_count: []u32, list_arg: []LiteralNode) void {
-            var code = @as(u16, 0);
-            var list = list_arg;
-
-            for (bit_count, 0..) |bits, n| {
-                code <<= 1;
-                if (n == 0 or bits == 0) {
-                    continue;
-                }
-                // The literals list[list.len-bits] .. list[list.len-bits]
-                // are encoded using "bits" bits, and get the values
-                // code, code + 1, ....  The code values are
-                // assigned in literal order (not frequency order).
-                const chunk = list[list.len - @as(u32, @intCast(bits)) ..];
-
-                self.lns = chunk;
-                mem.sort(LiteralNode, self.lns, {}, byLiteral);
-
-                for (chunk) |node| {
-                    self.codes[node.literal] = HuffCode{
-                        .code = bitReverse(u16, code, @as(u5, @intCast(n))),
-                        .len = @as(u16, @intCast(n)),
-                    };
-                    code += 1;
-                }
-                list = list[0 .. list.len - @as(u32, @intCast(bits))];
-            }
-        }
-    };
 }
 
-fn maxNode() LiteralNode {
-    return LiteralNode{
-        .literal = math.maxInt(u16),
-        .freq = math.maxInt(u16),
-    };
-}
+pub const Simple = struct {
+    /// Note that store blocks are limited to 65535 bytes.
+    buffer: []u8,
+    wp: usize,
+    block_writer: BlockWriter,
+    hasher: Container.Hasher,
+    strategy: Strategy,
 
-pub fn huffmanEncoder(comptime size: u32) HuffmanEncoder(size) {
-    return .{};
-}
+    pub const Strategy = enum { huffman, store };
 
-pub const LiteralEncoder = HuffmanEncoder(huffman.max_num_frequencies);
-pub const DistanceEncoder = HuffmanEncoder(huffman.distance_code_count);
-pub const CodegenEncoder = HuffmanEncoder(19);
-
-// Generates a HuffmanCode corresponding to the fixed literal table
-pub fn fixedLiteralEncoder() LiteralEncoder {
-    var h: LiteralEncoder = undefined;
-    var ch: u16 = 0;
-
-    while (ch < huffman.max_num_frequencies) : (ch += 1) {
-        var bits: u16 = undefined;
-        var size: u16 = undefined;
-        switch (ch) {
-            0...143 => {
-                // size 8, 000110000  .. 10111111
-                bits = ch + 48;
-                size = 8;
-            },
-            144...255 => {
-                // size 9, 110010000 .. 111111111
-                bits = ch + 400 - 144;
-                size = 9;
-            },
-            256...279 => {
-                // size 7, 0000000 .. 0010111
-                bits = ch - 256;
-                size = 7;
-            },
-            else => {
-                // size 8, 11000000 .. 11000111
-                bits = ch + 192 - 280;
-                size = 8;
-            },
-        }
-        h.codes[ch] = HuffCode{ .code = bitReverse(u16, bits, @as(u5, @intCast(size))), .len = size };
+    pub fn init(out: *Writer, buffer: []u8, container: Container) !Simple {
+        const self: Simple = .{
+            .buffer = buffer,
+            .wp = 0,
+            .block_writer = .init(out),
+            .hasher = .init(container),
+        };
+        try container.writeHeader(self.out);
+        return self;
     }
-    return h;
-}
 
-pub fn fixedDistanceEncoder() DistanceEncoder {
-    var h: DistanceEncoder = undefined;
-    for (h.codes, 0..) |_, ch| {
-        h.codes[ch] = HuffCode{ .code = bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 };
+    pub fn flush(self: *Simple) !void {
+        try self.flushBuffer(false);
+        try self.block_writer.storedBlock("", false);
+        try self.block_writer.flush();
     }
-    return h;
-}
 
-pub fn huffmanDistanceEncoder() DistanceEncoder {
-    var distance_freq = [1]u16{0} ** huffman.distance_code_count;
-    distance_freq[0] = 1;
-    // huff_distance is a static distance encoder used for huffman only encoding.
-    // It can be reused since we will not be encoding distance values.
-    var h: DistanceEncoder = .{};
-    h.generate(distance_freq[0..], 15);
-    return h;
-}
-
-fn byLiteral(context: void, a: LiteralNode, b: LiteralNode) bool {
-    _ = context;
-    return a.literal < b.literal;
-}
-
-fn byFreq(context: void, a: LiteralNode, b: LiteralNode) bool {
-    _ = context;
-    if (a.freq == b.freq) {
-        return a.literal < b.literal;
+    pub fn finish(self: *Simple) !void {
+        try self.flushBuffer(true);
+        try self.block_writer.flush();
+        try self.hasher.container().writeFooter(&self.hasher, self.out);
     }
-    return a.freq < b.freq;
-}
 
-fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!usize {
-    const c: *Compress = @fieldParentPtr("reader", r);
-    switch (c.state) {
-        .header => |i| {
-            const header = c.hasher.container().header();
-            const n = try w.write(header[i..]);
-            if (header.len - i - n == 0) {
-                c.state = .middle;
-            } else {
-                c.state.header += n;
-            }
-            return n;
-        },
-        .middle => {
-            c.input.fillMore() catch |err| switch (err) {
-                error.EndOfStream => {
-                    c.state = .final;
-                    return 0;
-                },
-                else => |e| return e,
-            };
-            const buffer_contents = c.input.buffered();
-            const min_lookahead = flate.match.min_length + flate.match.max_length;
-            const history_plus_lookahead_len = flate.history_len + min_lookahead;
-            if (buffer_contents.len < history_plus_lookahead_len) return 0;
-            const lookahead = buffer_contents[flate.history_len..];
-            const start = w.count;
-            const n = try c.tokenizeSlice(w, limit, lookahead) catch |err| switch (err) {
-                error.WriteFailed => return error.WriteFailed,
-            };
-            c.hasher.update(lookahead[0..n]);
-            c.input.toss(n);
-            return w.count - start;
-        },
-        .final => {
-            const buffer_contents = c.input.buffered();
-            const start = w.count;
-            const n = c.tokenizeSlice(w, limit, buffer_contents) catch |err| switch (err) {
-                error.WriteFailed => return error.WriteFailed,
-            };
-            if (buffer_contents.len - n == 0) {
-                c.hasher.update(buffer_contents);
-                c.input.tossAll();
-                {
-                    // In the case of flushing, last few lookahead buffers were
-                    // smaller than min match len, so only last literal can be
-                    // unwritten.
-                    assert(c.prev_match == null);
-                    try c.addPrevLiteral();
-                    c.prev_literal = null;
-
-                    try c.flushTokens(.final);
-                }
-                switch (c.hasher) {
-                    .gzip => |*gzip| {
-                        // GZIP 8 bytes footer
-                        //  - 4 bytes, CRC32 (CRC-32)
-                        //  - 4 bytes, ISIZE (Input SIZE) - size of the original (uncompressed) input data modulo 2^32
-                        comptime assert(c.footer_buffer.len == 8);
-                        std.mem.writeInt(u32, c.footer_buffer[0..4], gzip.final(), .little);
-                        std.mem.writeInt(u32, c.footer_buffer[4..8], gzip.bytes_read, .little);
-                        c.state = .{ .footer = 0 };
-                    },
-                    .zlib => |*zlib| {
-                        // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
-                        // 4 bytes of ADLER32 (Adler-32 checksum)
-                        // Checksum value of the uncompressed data (excluding any
-                        // dictionary data) computed according to Adler-32
-                        // algorithm.
-                        comptime assert(c.footer_buffer.len == 8);
-                        std.mem.writeInt(u32, c.footer_buffer[4..8], zlib.final, .big);
-                        c.state = .{ .footer = 4 };
-                    },
-                    .raw => {
-                        c.state = .ended;
-                    },
-                }
-            }
-            return w.count - start;
-        },
-        .ended => return error.EndOfStream,
-        .footer => |i| {
-            const remaining = c.footer_buffer[i..];
-            const n = try w.write(limit.slice(remaining));
-            c.state = if (n == remaining) .ended else .{ .footer = i - n };
-            return n;
-        },
+    fn flushBuffer(self: *Simple, final: bool) !void {
+        const buf = self.buffer[0..self.wp];
+        switch (self.strategy) {
+            .huffman => try self.block_writer.huffmanBlock(buf, final),
+            .store => try self.block_writer.storedBlock(buf, final),
+        }
+        self.wp = 0;
     }
-}
+};
 
 test "generate a Huffman code from an array of frequencies" {
     var freqs: [19]u16 = [_]u16{
@@ -868,7 +299,8 @@ test "generate a Huffman code from an array of frequencies" {
         5, // 18
     };
 
-    var enc = huffmanEncoder(19);
+    var codes: [19]HuffmanEncoder.Code = undefined;
+    var enc: HuffmanEncoder = .{ .codes = &codes };
     enc.generate(freqs[0..], 7);
 
     try testing.expectEqual(@as(u32, 141), enc.bitLength(freqs[0..]));
@@ -906,120 +338,6 @@ test "generate a Huffman code from an array of frequencies" {
     try testing.expectEqual(@as(u16, 0x3f), enc.codes[16].code);
 }
 
-test "generate a Huffman code for the fixed literal table specific to Deflate" {
-    const enc = fixedLiteralEncoder();
-    for (enc.codes) |c| {
-        switch (c.len) {
-            7 => {
-                const v = @bitReverse(@as(u7, @intCast(c.code)));
-                try testing.expect(v <= 0b0010111);
-            },
-            8 => {
-                const v = @bitReverse(@as(u8, @intCast(c.code)));
-                try testing.expect((v >= 0b000110000 and v <= 0b10111111) or
-                    (v >= 0b11000000 and v <= 11000111));
-            },
-            9 => {
-                const v = @bitReverse(@as(u9, @intCast(c.code)));
-                try testing.expect(v >= 0b110010000 and v <= 0b111111111);
-            },
-            else => unreachable,
-        }
-    }
-}
-
-test "generate a Huffman code for the 30 possible relative distances (LZ77 distances) of Deflate" {
-    const enc = fixedDistanceEncoder();
-    for (enc.codes) |c| {
-        const v = @bitReverse(@as(u5, @intCast(c.code)));
-        try testing.expect(v <= 29);
-        try testing.expect(c.len == 5);
-    }
-}
-
-// Reverse bit-by-bit a N-bit code.
-fn bitReverse(comptime T: type, value: T, n: usize) T {
-    const r = @bitReverse(value);
-    return r >> @as(math.Log2Int(T), @intCast(@typeInfo(T).int.bits - n));
-}
-
-test bitReverse {
-    const ReverseBitsTest = struct {
-        in: u16,
-        bit_count: u5,
-        out: u16,
-    };
-
-    const reverse_bits_tests = [_]ReverseBitsTest{
-        .{ .in = 1, .bit_count = 1, .out = 1 },
-        .{ .in = 1, .bit_count = 2, .out = 2 },
-        .{ .in = 1, .bit_count = 3, .out = 4 },
-        .{ .in = 1, .bit_count = 4, .out = 8 },
-        .{ .in = 1, .bit_count = 5, .out = 16 },
-        .{ .in = 17, .bit_count = 5, .out = 17 },
-        .{ .in = 257, .bit_count = 9, .out = 257 },
-        .{ .in = 29, .bit_count = 5, .out = 23 },
-    };
-
-    for (reverse_bits_tests) |h| {
-        const v = bitReverse(u16, h.in, h.bit_count);
-        try std.testing.expectEqual(h.out, v);
-    }
-}
-
-test "fixedLiteralEncoder codes" {
-    var al = std.ArrayList(u8).init(testing.allocator);
-    defer al.deinit();
-    var bw = std.Io.bitWriter(.little, al.writer());
-
-    const f = fixedLiteralEncoder();
-    for (f.codes) |c| {
-        try bw.writeBits(c.code, c.len);
-    }
-    try testing.expectEqualSlices(u8, &fixed_codes, al.items);
-}
-
-pub const fixed_codes = [_]u8{
-    0b00001100, 0b10001100, 0b01001100, 0b11001100, 0b00101100, 0b10101100, 0b01101100, 0b11101100,
-    0b00011100, 0b10011100, 0b01011100, 0b11011100, 0b00111100, 0b10111100, 0b01111100, 0b11111100,
-    0b00000010, 0b10000010, 0b01000010, 0b11000010, 0b00100010, 0b10100010, 0b01100010, 0b11100010,
-    0b00010010, 0b10010010, 0b01010010, 0b11010010, 0b00110010, 0b10110010, 0b01110010, 0b11110010,
-    0b00001010, 0b10001010, 0b01001010, 0b11001010, 0b00101010, 0b10101010, 0b01101010, 0b11101010,
-    0b00011010, 0b10011010, 0b01011010, 0b11011010, 0b00111010, 0b10111010, 0b01111010, 0b11111010,
-    0b00000110, 0b10000110, 0b01000110, 0b11000110, 0b00100110, 0b10100110, 0b01100110, 0b11100110,
-    0b00010110, 0b10010110, 0b01010110, 0b11010110, 0b00110110, 0b10110110, 0b01110110, 0b11110110,
-    0b00001110, 0b10001110, 0b01001110, 0b11001110, 0b00101110, 0b10101110, 0b01101110, 0b11101110,
-    0b00011110, 0b10011110, 0b01011110, 0b11011110, 0b00111110, 0b10111110, 0b01111110, 0b11111110,
-    0b00000001, 0b10000001, 0b01000001, 0b11000001, 0b00100001, 0b10100001, 0b01100001, 0b11100001,
-    0b00010001, 0b10010001, 0b01010001, 0b11010001, 0b00110001, 0b10110001, 0b01110001, 0b11110001,
-    0b00001001, 0b10001001, 0b01001001, 0b11001001, 0b00101001, 0b10101001, 0b01101001, 0b11101001,
-    0b00011001, 0b10011001, 0b01011001, 0b11011001, 0b00111001, 0b10111001, 0b01111001, 0b11111001,
-    0b00000101, 0b10000101, 0b01000101, 0b11000101, 0b00100101, 0b10100101, 0b01100101, 0b11100101,
-    0b00010101, 0b10010101, 0b01010101, 0b11010101, 0b00110101, 0b10110101, 0b01110101, 0b11110101,
-    0b00001101, 0b10001101, 0b01001101, 0b11001101, 0b00101101, 0b10101101, 0b01101101, 0b11101101,
-    0b00011101, 0b10011101, 0b01011101, 0b11011101, 0b00111101, 0b10111101, 0b01111101, 0b11111101,
-    0b00010011, 0b00100110, 0b01001110, 0b10011010, 0b00111100, 0b01100101, 0b11101010, 0b10110100,
-    0b11101001, 0b00110011, 0b01100110, 0b11001110, 0b10011010, 0b00111101, 0b01100111, 0b11101110,
-    0b10111100, 0b11111001, 0b00001011, 0b00010110, 0b00101110, 0b01011010, 0b10111100, 0b01100100,
-    0b11101001, 0b10110010, 0b11100101, 0b00101011, 0b01010110, 0b10101110, 0b01011010, 0b10111101,
-    0b01100110, 0b11101101, 0b10111010, 0b11110101, 0b00011011, 0b00110110, 0b01101110, 0b11011010,
-    0b10111100, 0b01100101, 0b11101011, 0b10110110, 0b11101101, 0b00111011, 0b01110110, 0b11101110,
-    0b11011010, 0b10111101, 0b01100111, 0b11101111, 0b10111110, 0b11111101, 0b00000111, 0b00001110,
-    0b00011110, 0b00111010, 0b01111100, 0b11100100, 0b11101000, 0b10110001, 0b11100011, 0b00100111,
-    0b01001110, 0b10011110, 0b00111010, 0b01111101, 0b11100110, 0b11101100, 0b10111001, 0b11110011,
-    0b00010111, 0b00101110, 0b01011110, 0b10111010, 0b01111100, 0b11100101, 0b11101010, 0b10110101,
-    0b11101011, 0b00110111, 0b01101110, 0b11011110, 0b10111010, 0b01111101, 0b11100111, 0b11101110,
-    0b10111101, 0b11111011, 0b00001111, 0b00011110, 0b00111110, 0b01111010, 0b11111100, 0b11100100,
-    0b11101001, 0b10110011, 0b11100111, 0b00101111, 0b01011110, 0b10111110, 0b01111010, 0b11111101,
-    0b11100110, 0b11101101, 0b10111011, 0b11110111, 0b00011111, 0b00111110, 0b01111110, 0b11111010,
-    0b11111100, 0b11100101, 0b11101011, 0b10110111, 0b11101111, 0b00111111, 0b01111110, 0b11111110,
-    0b11111010, 0b11111101, 0b11100111, 0b11101111, 0b10111111, 0b11111111, 0b00000000, 0b00100000,
-    0b00001000, 0b00001100, 0b10000001, 0b11000010, 0b11100000, 0b00001000, 0b00100100, 0b00001010,
-    0b10001101, 0b11000001, 0b11100010, 0b11110000, 0b00000100, 0b00100010, 0b10001001, 0b01001100,
-    0b10100001, 0b11010010, 0b11101000, 0b00000011, 0b10000011, 0b01000011, 0b11000011, 0b00100011,
-    0b10100011,
-};
-
 test "tokenization" {
     const L = Token.initLiteral;
     const M = Token.initMatch;
@@ -1133,7 +451,7 @@ test "file tokenization" {
         const data = case.data;
 
         for (levels, 0..) |level, i| { // for each compression level
-            var original: Reader = .fixed(data);
+            var original: std.Io.Reader = .fixed(data);
 
             // buffer for decompressed data
             var al = std.ArrayList(u8).init(testing.allocator);
@@ -1198,32 +516,33 @@ const TokenDecoder = struct {
 };
 
 test "store simple compressor" {
-    const data = "Hello world!";
-    const expected = [_]u8{
-        0x1, // block type 0, final bit set
-        0xc, 0x0, // len = 12
-        0xf3, 0xff, // ~len
-        'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!', //
-        //0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x21,
-    };
-
-    var fbs: Reader = .fixed(data);
-    var al = std.ArrayList(u8).init(testing.allocator);
-    defer al.deinit();
-
-    var cmp = try store.compressor(.raw, al.writer());
-    try cmp.compress(&fbs);
-    try cmp.finish();
-    try testing.expectEqualSlices(u8, &expected, al.items);
-
-    fbs = .fixed(data);
-    try al.resize(0);
-
-    // huffman only compresoor will also emit store block for this small sample
-    var hc = try huffman.compressor(.raw, al.writer());
-    try hc.compress(&fbs);
-    try hc.finish();
-    try testing.expectEqualSlices(u8, &expected, al.items);
+    if (true) return error.SkipZigTest;
+    //const data = "Hello world!";
+    //const expected = [_]u8{
+    //    0x1, // block type 0, final bit set
+    //    0xc, 0x0, // len = 12
+    //    0xf3, 0xff, // ~len
+    //    'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', '!', //
+    //    //0x48, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, 0x64, 0x21,
+    //};
+
+    //var fbs: std.Io.Reader = .fixed(data);
+    //var al = std.ArrayList(u8).init(testing.allocator);
+    //defer al.deinit();
+
+    //var cmp = try store.compressor(.raw, al.writer());
+    //try cmp.compress(&fbs);
+    //try cmp.finish();
+    //try testing.expectEqualSlices(u8, &expected, al.items);
+
+    //fbs = .fixed(data);
+    //try al.resize(0);
+
+    //// huffman only compresoor will also emit store block for this small sample
+    //var hc = try huffman.compressor(.raw, al.writer());
+    //try hc.compress(&fbs);
+    //try hc.finish();
+    //try testing.expectEqualSlices(u8, &expected, al.items);
 }
 
 test "sliding window match" {
lib/std/compress/flate/Decompress.zig
@@ -620,10 +620,9 @@ test "init/find" {
 }
 
 test "encode/decode literals" {
-    const LiteralEncoder = std.compress.flate.Compress.LiteralEncoder;
-
+    var codes: [flate.HuffmanEncoder.max_num_frequencies]flate.HuffmanEncoder.Code = undefined;
     for (1..286) |j| { // for all different number of codes
-        var enc: LiteralEncoder = .{};
+        var enc: flate.HuffmanEncoder = .{ .codes = &codes };
         // create frequencies
         var freq = [_]u16{0} ** 286;
         freq[256] = 1; // ensure we have end of block code
lib/std/compress/flate/HuffmanEncoder.zig
@@ -0,0 +1,475 @@
+const HuffmanEncoder = @This();
+const std = @import("std");
+const assert = std.debug.assert;
+const testing = std.testing;
+
+codes: []Code,
+// Reusable buffer with the longest possible frequency table.
+freq_cache: [max_num_frequencies + 1]LiteralNode,
+bit_count: [17]u32,
+lns: []LiteralNode, // sorted by literal, stored to avoid repeated allocation in generate
+lfs: []LiteralNode, // sorted by frequency, stored to avoid repeated allocation in generate
+
+pub const LiteralNode = struct {
+    literal: u16,
+    freq: u16,
+
+    pub fn max() LiteralNode {
+        return .{
+            .literal = std.math.maxInt(u16),
+            .freq = std.math.maxInt(u16),
+        };
+    }
+};
+
+pub const Code = struct {
+    code: u16 = 0,
+    len: u16 = 0,
+};
+
+/// The odd order in which the codegen code sizes are written.
+pub const codegen_order = [_]u32{ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+/// The number of codegen codes.
+pub const codegen_code_count = 19;
+
+/// The largest distance code.
+pub const distance_code_count = 30;
+
+/// Maximum number of literals.
+pub const max_num_lit = 286;
+
+/// Max number of frequencies used for a Huffman Code
+/// Possible lengths are codegen_code_count (19), distance_code_count (30) and max_num_lit (286).
+/// The largest of these is max_num_lit.
+pub const max_num_frequencies = max_num_lit;
+
+/// Biggest block size for uncompressed block.
+pub const max_store_block_size = 65535;
+/// The special code used to mark the end of a block.
+pub const end_block_marker = 256;
+
+/// Update this Huffman Code object to be the minimum code for the specified frequency count.
+///
+/// freq  An array of frequencies, in which frequency[i] gives the frequency of literal i.
+/// max_bits  The maximum number of bits to use for any literal.
+pub fn generate(self: *HuffmanEncoder, freq: []u16, max_bits: u32) void {
+    var list = self.freq_cache[0 .. freq.len + 1];
+    // Number of non-zero literals
+    var count: u32 = 0;
+    // Set list to be the set of all non-zero literals and their frequencies
+    for (freq, 0..) |f, i| {
+        if (f != 0) {
+            list[count] = LiteralNode{ .literal = @as(u16, @intCast(i)), .freq = f };
+            count += 1;
+        } else {
+            list[count] = LiteralNode{ .literal = 0x00, .freq = 0 };
+            self.codes[i].len = 0;
+        }
+    }
+    list[freq.len] = LiteralNode{ .literal = 0x00, .freq = 0 };
+
+    list = list[0..count];
+    if (count <= 2) {
+        // Handle the small cases here, because they are awkward for the general case code. With
+        // two or fewer literals, everything has bit length 1.
+        for (list, 0..) |node, i| {
+            // "list" is in order of increasing literal value.
+            self.codes[node.literal] = .{
+                .code = @intCast(i),
+                .len = 1,
+            };
+        }
+        return;
+    }
+    self.lfs = list;
+    std.mem.sort(LiteralNode, self.lfs, {}, byFreq);
+
+    // Get the number of literals for each bit count
+    const bit_count = self.bitCounts(list, max_bits);
+    // And do the assignment
+    self.assignEncodingAndSize(bit_count, list);
+}
+
+pub fn bitLength(self: *HuffmanEncoder, freq: []u16) u32 {
+    var total: u32 = 0;
+    for (freq, 0..) |f, i| {
+        if (f != 0) {
+            total += @as(u32, @intCast(f)) * @as(u32, @intCast(self.codes[i].len));
+        }
+    }
+    return total;
+}
+
+/// Return the number of literals assigned to each bit size in the Huffman encoding
+///
+/// This method is only called when list.len >= 3
+/// The cases of 0, 1, and 2 literals are handled by special case code.
+///
+/// list: An array of the literals with non-zero frequencies
+/// and their associated frequencies. The array is in order of increasing
+/// frequency, and has as its last element a special element with frequency
+/// `math.maxInt(i32)`
+///
+/// max_bits: The maximum number of bits that should be used to encode any literal.
+/// Must be less than 16.
+///
+/// Returns an integer array in which array[i] indicates the number of literals
+/// that should be encoded in i bits.
+fn bitCounts(self: *HuffmanEncoder, list: []LiteralNode, max_bits_to_use: usize) []u32 {
+    var max_bits = max_bits_to_use;
+    const n = list.len;
+    const max_bits_limit = 16;
+
+    assert(max_bits < max_bits_limit);
+
+    // The tree can't have greater depth than n - 1, no matter what. This
+    // saves a little bit of work in some small cases
+    max_bits = @min(max_bits, n - 1);
+
+    // Create information about each of the levels.
+    // A bogus "Level 0" whose sole purpose is so that
+    // level1.prev.needed == 0.  This makes level1.next_pair_freq
+    // be a legitimate value that never gets chosen.
+    var levels: [max_bits_limit]LevelInfo = std.mem.zeroes([max_bits_limit]LevelInfo);
+    // leaf_counts[i] counts the number of literals at the left
+    // of ancestors of the rightmost node at level i.
+    // leaf_counts[i][j] is the number of literals at the left
+    // of the level j ancestor.
+    var leaf_counts: [max_bits_limit][max_bits_limit]u32 = @splat(0);
+
+    {
+        var level = @as(u32, 1);
+        while (level <= max_bits) : (level += 1) {
+            // For every level, the first two items are the first two characters.
+            // We initialize the levels as if we had already figured this out.
+            levels[level] = LevelInfo{
+                .level = level,
+                .last_freq = list[1].freq,
+                .next_char_freq = list[2].freq,
+                .next_pair_freq = list[0].freq + list[1].freq,
+                .needed = 0,
+            };
+            leaf_counts[level][level] = 2;
+            if (level == 1) {
+                levels[level].next_pair_freq = std.math.maxInt(i32);
+            }
+        }
+    }
+
+    // We need a total of 2*n - 2 items at top level and have already generated 2.
+    levels[max_bits].needed = 2 * @as(u32, @intCast(n)) - 4;
+
+    {
+        var level = max_bits;
+        while (true) {
+            var l = &levels[level];
+            if (l.next_pair_freq == std.math.maxInt(i32) and l.next_char_freq == std.math.maxInt(i32)) {
+                // We've run out of both leaves and pairs.
+                // End all calculations for this level.
+                // To make sure we never come back to this level or any lower level,
+                // set next_pair_freq impossibly large.
+                l.needed = 0;
+                levels[level + 1].next_pair_freq = std.math.maxInt(i32);
+                level += 1;
+                continue;
+            }
+
+            const prev_freq = l.last_freq;
+            if (l.next_char_freq < l.next_pair_freq) {
+                // The next item on this row is a leaf node.
+                const next = leaf_counts[level][level] + 1;
+                l.last_freq = l.next_char_freq;
+                // Lower leaf_counts are the same of the previous node.
+                leaf_counts[level][level] = next;
+                if (next >= list.len) {
+                    l.next_char_freq = LiteralNode.max().freq;
+                } else {
+                    l.next_char_freq = list[next].freq;
+                }
+            } else {
+                // The next item on this row is a pair from the previous row.
+                // next_pair_freq isn't valid until we generate two
+                // more values in the level below
+                l.last_freq = l.next_pair_freq;
+                // Take leaf counts from the lower level, except counts[level] remains the same.
+                @memcpy(leaf_counts[level][0..level], leaf_counts[level - 1][0..level]);
+                levels[l.level - 1].needed = 2;
+            }
+
+            l.needed -= 1;
+            if (l.needed == 0) {
+                // We've done everything we need to do for this level.
+                // Continue calculating one level up. Fill in next_pair_freq
+                // of that level with the sum of the two nodes we've just calculated on
+                // this level.
+                if (l.level == max_bits) {
+                    // All done!
+                    break;
+                }
+                levels[l.level + 1].next_pair_freq = prev_freq + l.last_freq;
+                level += 1;
+            } else {
+                // If we stole from below, move down temporarily to replenish it.
+                while (levels[level - 1].needed > 0) {
+                    level -= 1;
+                    if (level == 0) {
+                        break;
+                    }
+                }
+            }
+        }
+    }
+
+    // Somethings is wrong if at the end, the top level is null or hasn't used
+    // all of the leaves.
+    assert(leaf_counts[max_bits][max_bits] == n);
+
+    var bit_count = self.bit_count[0 .. max_bits + 1];
+    var bits: u32 = 1;
+    const counts = &leaf_counts[max_bits];
+    {
+        var level = max_bits;
+        while (level > 0) : (level -= 1) {
+            // counts[level] gives the number of literals requiring at least "bits"
+            // bits to encode.
+            bit_count[bits] = counts[level] - counts[level - 1];
+            bits += 1;
+            if (level == 0) {
+                break;
+            }
+        }
+    }
+    return bit_count;
+}
+
+/// Look at the leaves and assign them a bit count and an encoding as specified
+/// in RFC 1951 3.2.2
+fn assignEncodingAndSize(self: *HuffmanEncoder, bit_count: []u32, list_arg: []LiteralNode) void {
+    var code = @as(u16, 0);
+    var list = list_arg;
+
+    for (bit_count, 0..) |bits, n| {
+        code <<= 1;
+        if (n == 0 or bits == 0) {
+            continue;
+        }
+        // The literals list[list.len-bits] .. list[list.len-bits]
+        // are encoded using "bits" bits, and get the values
+        // code, code + 1, ....  The code values are
+        // assigned in literal order (not frequency order).
+        const chunk = list[list.len - @as(u32, @intCast(bits)) ..];
+
+        self.lns = chunk;
+        std.mem.sort(LiteralNode, self.lns, {}, byLiteral);
+
+        for (chunk) |node| {
+            self.codes[node.literal] = .{
+                .code = bitReverse(u16, code, @as(u5, @intCast(n))),
+                .len = @as(u16, @intCast(n)),
+            };
+            code += 1;
+        }
+        list = list[0 .. list.len - @as(u32, @intCast(bits))];
+    }
+}
+
+fn byFreq(context: void, a: LiteralNode, b: LiteralNode) bool {
+    _ = context;
+    if (a.freq == b.freq) {
+        return a.literal < b.literal;
+    }
+    return a.freq < b.freq;
+}
+
+/// Describes the state of the constructed tree for a given depth.
+const LevelInfo = struct {
+    /// Our level.  for better printing
+    level: u32,
+    /// The frequency of the last node at this level
+    last_freq: u32,
+    /// The frequency of the next character to add to this level
+    next_char_freq: u32,
+    /// The frequency of the next pair (from level below) to add to this level.
+    /// Only valid if the "needed" value of the next lower level is 0.
+    next_pair_freq: u32,
+    /// The number of chains remaining to generate for this level before moving
+    /// up to the next level
+    needed: u32,
+};
+
+fn byLiteral(context: void, a: LiteralNode, b: LiteralNode) bool {
+    _ = context;
+    return a.literal < b.literal;
+}
+
+/// Reverse bit-by-bit a N-bit code.
+fn bitReverse(comptime T: type, value: T, n: usize) T {
+    const r = @bitReverse(value);
+    return r >> @as(std.math.Log2Int(T), @intCast(@typeInfo(T).int.bits - n));
+}
+
+test bitReverse {
+    const ReverseBitsTest = struct {
+        in: u16,
+        bit_count: u5,
+        out: u16,
+    };
+
+    const reverse_bits_tests = [_]ReverseBitsTest{
+        .{ .in = 1, .bit_count = 1, .out = 1 },
+        .{ .in = 1, .bit_count = 2, .out = 2 },
+        .{ .in = 1, .bit_count = 3, .out = 4 },
+        .{ .in = 1, .bit_count = 4, .out = 8 },
+        .{ .in = 1, .bit_count = 5, .out = 16 },
+        .{ .in = 17, .bit_count = 5, .out = 17 },
+        .{ .in = 257, .bit_count = 9, .out = 257 },
+        .{ .in = 29, .bit_count = 5, .out = 23 },
+    };
+
+    for (reverse_bits_tests) |h| {
+        const v = bitReverse(u16, h.in, h.bit_count);
+        try std.testing.expectEqual(h.out, v);
+    }
+}
+
+/// Generates a HuffmanCode corresponding to the fixed literal table
+pub fn fixedLiteralEncoder(codes: *[max_num_frequencies]Code) HuffmanEncoder {
+    var h: HuffmanEncoder = undefined;
+    h.codes = codes;
+    var ch: u16 = 0;
+
+    while (ch < max_num_frequencies) : (ch += 1) {
+        var bits: u16 = undefined;
+        var size: u16 = undefined;
+        switch (ch) {
+            0...143 => {
+                // size 8, 000110000  .. 10111111
+                bits = ch + 48;
+                size = 8;
+            },
+            144...255 => {
+                // size 9, 110010000 .. 111111111
+                bits = ch + 400 - 144;
+                size = 9;
+            },
+            256...279 => {
+                // size 7, 0000000 .. 0010111
+                bits = ch - 256;
+                size = 7;
+            },
+            else => {
+                // size 8, 11000000 .. 11000111
+                bits = ch + 192 - 280;
+                size = 8;
+            },
+        }
+        h.codes[ch] = .{ .code = bitReverse(u16, bits, @as(u5, @intCast(size))), .len = size };
+    }
+    return h;
+}
+
+pub fn fixedDistanceEncoder(codes: *[distance_code_count]Code) HuffmanEncoder {
+    var h: HuffmanEncoder = undefined;
+    h.codes = codes;
+    for (h.codes, 0..) |_, ch| {
+        h.codes[ch] = .{ .code = bitReverse(u16, @as(u16, @intCast(ch)), 5), .len = 5 };
+    }
+    return h;
+}
+
+pub fn huffmanDistanceEncoder(codes: *[distance_code_count]Code) HuffmanEncoder {
+    var distance_freq: [distance_code_count]u16 = @splat(0);
+    distance_freq[0] = 1;
+    // huff_distance is a static distance encoder used for huffman only encoding.
+    // It can be reused since we will not be encoding distance values.
+    var h: HuffmanEncoder = .{};
+    h.codes = codes;
+    h.generate(distance_freq[0..], 15);
+    return h;
+}
+
+test "generate a Huffman code for the fixed literal table specific to Deflate" {
+    const enc = fixedLiteralEncoder();
+    for (enc.codes) |c| {
+        switch (c.len) {
+            7 => {
+                const v = @bitReverse(@as(u7, @intCast(c.code)));
+                try testing.expect(v <= 0b0010111);
+            },
+            8 => {
+                const v = @bitReverse(@as(u8, @intCast(c.code)));
+                try testing.expect((v >= 0b000110000 and v <= 0b10111111) or
+                    (v >= 0b11000000 and v <= 11000111));
+            },
+            9 => {
+                const v = @bitReverse(@as(u9, @intCast(c.code)));
+                try testing.expect(v >= 0b110010000 and v <= 0b111111111);
+            },
+            else => unreachable,
+        }
+    }
+}
+
+test "generate a Huffman code for the 30 possible relative distances (LZ77 distances) of Deflate" {
+    var codes: [distance_code_count]Code = undefined;
+    const enc = fixedDistanceEncoder(&codes);
+    for (enc.codes) |c| {
+        const v = @bitReverse(@as(u5, @intCast(c.code)));
+        try testing.expect(v <= 29);
+        try testing.expect(c.len == 5);
+    }
+}
+
+test "fixedLiteralEncoder codes" {
+    var al = std.ArrayList(u8).init(testing.allocator);
+    defer al.deinit();
+    var bw = std.Io.bitWriter(.little, al.writer());
+
+    var codes: [max_num_frequencies]Code = undefined;
+    const f = fixedLiteralEncoder(&codes);
+    for (f.codes) |c| {
+        try bw.writeBits(c.code, c.len);
+    }
+    try testing.expectEqualSlices(u8, &fixed_codes, al.items);
+}
+
+pub const fixed_codes = [_]u8{
+    0b00001100, 0b10001100, 0b01001100, 0b11001100, 0b00101100, 0b10101100, 0b01101100, 0b11101100,
+    0b00011100, 0b10011100, 0b01011100, 0b11011100, 0b00111100, 0b10111100, 0b01111100, 0b11111100,
+    0b00000010, 0b10000010, 0b01000010, 0b11000010, 0b00100010, 0b10100010, 0b01100010, 0b11100010,
+    0b00010010, 0b10010010, 0b01010010, 0b11010010, 0b00110010, 0b10110010, 0b01110010, 0b11110010,
+    0b00001010, 0b10001010, 0b01001010, 0b11001010, 0b00101010, 0b10101010, 0b01101010, 0b11101010,
+    0b00011010, 0b10011010, 0b01011010, 0b11011010, 0b00111010, 0b10111010, 0b01111010, 0b11111010,
+    0b00000110, 0b10000110, 0b01000110, 0b11000110, 0b00100110, 0b10100110, 0b01100110, 0b11100110,
+    0b00010110, 0b10010110, 0b01010110, 0b11010110, 0b00110110, 0b10110110, 0b01110110, 0b11110110,
+    0b00001110, 0b10001110, 0b01001110, 0b11001110, 0b00101110, 0b10101110, 0b01101110, 0b11101110,
+    0b00011110, 0b10011110, 0b01011110, 0b11011110, 0b00111110, 0b10111110, 0b01111110, 0b11111110,
+    0b00000001, 0b10000001, 0b01000001, 0b11000001, 0b00100001, 0b10100001, 0b01100001, 0b11100001,
+    0b00010001, 0b10010001, 0b01010001, 0b11010001, 0b00110001, 0b10110001, 0b01110001, 0b11110001,
+    0b00001001, 0b10001001, 0b01001001, 0b11001001, 0b00101001, 0b10101001, 0b01101001, 0b11101001,
+    0b00011001, 0b10011001, 0b01011001, 0b11011001, 0b00111001, 0b10111001, 0b01111001, 0b11111001,
+    0b00000101, 0b10000101, 0b01000101, 0b11000101, 0b00100101, 0b10100101, 0b01100101, 0b11100101,
+    0b00010101, 0b10010101, 0b01010101, 0b11010101, 0b00110101, 0b10110101, 0b01110101, 0b11110101,
+    0b00001101, 0b10001101, 0b01001101, 0b11001101, 0b00101101, 0b10101101, 0b01101101, 0b11101101,
+    0b00011101, 0b10011101, 0b01011101, 0b11011101, 0b00111101, 0b10111101, 0b01111101, 0b11111101,
+    0b00010011, 0b00100110, 0b01001110, 0b10011010, 0b00111100, 0b01100101, 0b11101010, 0b10110100,
+    0b11101001, 0b00110011, 0b01100110, 0b11001110, 0b10011010, 0b00111101, 0b01100111, 0b11101110,
+    0b10111100, 0b11111001, 0b00001011, 0b00010110, 0b00101110, 0b01011010, 0b10111100, 0b01100100,
+    0b11101001, 0b10110010, 0b11100101, 0b00101011, 0b01010110, 0b10101110, 0b01011010, 0b10111101,
+    0b01100110, 0b11101101, 0b10111010, 0b11110101, 0b00011011, 0b00110110, 0b01101110, 0b11011010,
+    0b10111100, 0b01100101, 0b11101011, 0b10110110, 0b11101101, 0b00111011, 0b01110110, 0b11101110,
+    0b11011010, 0b10111101, 0b01100111, 0b11101111, 0b10111110, 0b11111101, 0b00000111, 0b00001110,
+    0b00011110, 0b00111010, 0b01111100, 0b11100100, 0b11101000, 0b10110001, 0b11100011, 0b00100111,
+    0b01001110, 0b10011110, 0b00111010, 0b01111101, 0b11100110, 0b11101100, 0b10111001, 0b11110011,
+    0b00010111, 0b00101110, 0b01011110, 0b10111010, 0b01111100, 0b11100101, 0b11101010, 0b10110101,
+    0b11101011, 0b00110111, 0b01101110, 0b11011110, 0b10111010, 0b01111101, 0b11100111, 0b11101110,
+    0b10111101, 0b11111011, 0b00001111, 0b00011110, 0b00111110, 0b01111010, 0b11111100, 0b11100100,
+    0b11101001, 0b10110011, 0b11100111, 0b00101111, 0b01011110, 0b10111110, 0b01111010, 0b11111101,
+    0b11100110, 0b11101101, 0b10111011, 0b11110111, 0b00011111, 0b00111110, 0b01111110, 0b11111010,
+    0b11111100, 0b11100101, 0b11101011, 0b10110111, 0b11101111, 0b00111111, 0b01111110, 0b11111110,
+    0b11111010, 0b11111101, 0b11100111, 0b11101111, 0b10111111, 0b11111111, 0b00000000, 0b00100000,
+    0b00001000, 0b00001100, 0b10000001, 0b11000010, 0b11100000, 0b00001000, 0b00100100, 0b00001010,
+    0b10001101, 0b11000001, 0b11100010, 0b11110000, 0b00000100, 0b00100010, 0b10001001, 0b01001100,
+    0b10100001, 0b11010010, 0b11101000, 0b00000011, 0b10000011, 0b01000011, 0b11000011, 0b00100011,
+    0b10100011,
+};
lib/std/compress/flate.zig
@@ -1,7 +1,7 @@
 const builtin = @import("builtin");
 const std = @import("../std.zig");
 const testing = std.testing;
-const Writer = std.io.Writer;
+const Writer = std.Io.Writer;
 
 /// Container of the deflate bit stream body. Container adds header before
 /// deflate bit stream and footer after. It can bi gzip, zlib or raw (no header,
@@ -77,7 +77,7 @@ pub const Container = enum {
         raw: void,
         gzip: struct {
             crc: std.hash.Crc32 = .init(),
-            count: usize = 0,
+            count: u32 = 0,
         },
         zlib: std.hash.Adler32,
 
@@ -98,7 +98,7 @@ pub const Container = enum {
                 .raw => {},
                 .gzip => |*gzip| {
                     gzip.update(buf);
-                    gzip.count += buf.len;
+                    gzip.count +%= buf.len;
                 },
                 .zlib => |*zlib| {
                     zlib.update(buf);
@@ -148,35 +148,9 @@ pub const Compress = @import("flate/Compress.zig");
 /// decompression and correctly produces the original full-size data or file.
 pub const Decompress = @import("flate/Decompress.zig");
 
-/// Huffman only compression. Without Lempel-Ziv match searching. Faster
-/// compression, less memory requirements but bigger compressed sizes.
-pub const huffman = struct {
-    // The odd order in which the codegen code sizes are written.
-    pub const codegen_order = [_]u32{ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
-    // The number of codegen codes.
-    pub const codegen_code_count = 19;
-
-    // The largest distance code.
-    pub const distance_code_count = 30;
-
-    // Maximum number of literals.
-    pub const max_num_lit = 286;
-
-    // Max number of frequencies used for a Huffman Code
-    // Possible lengths are codegen_code_count (19), distance_code_count (30) and max_num_lit (286).
-    // The largest of these is max_num_lit.
-    pub const max_num_frequencies = max_num_lit;
-
-    // Biggest block size for uncompressed block.
-    pub const max_store_block_size = 65535;
-    // The special code used to mark the end of a block.
-    pub const end_block_marker = 256;
-};
-
-test {
-    _ = Compress;
-    _ = Decompress;
-}
+/// Compression without Lempel-Ziv match searching. Faster compression, less
+/// memory requirements but bigger compressed sizes.
+pub const HuffmanEncoder = @import("flate/HuffmanEncoder.zig");
 
 test "compress/decompress" {
     const print = std.debug.print;
@@ -217,12 +191,11 @@ test "compress/decompress" {
         },
     };
 
-    for (cases, 0..) |case, case_no| { // for each case
+    for (cases, 0..) |case, case_no| {
         const data = case.data;
 
-        for (levels, 0..) |level, i| { // for each compression level
-
-            inline for (Container.list) |container| { // for each wrapping
+        for (levels, 0..) |level, i| {
+            for (Container.list) |container| {
                 var compressed_size: usize = if (case.gzip_sizes[i] > 0)
                     case.gzip_sizes[i] - Container.gzip.size() + container.size()
                 else
@@ -230,21 +203,21 @@ test "compress/decompress" {
 
                 // compress original stream to compressed stream
                 {
-                    var original: std.io.Reader = .fixed(data);
                     var compressed: Writer = .fixed(&cmp_buf);
-                    var compress: Compress = .init(&original, &.{}, .{ .container = .raw, .level = level });
-                    const n = try compress.reader.streamRemaining(&compressed);
+                    var compress: Compress = .init(&compressed, &.{}, .{ .container = .raw, .level = level });
+                    try compress.writer.writeAll(data);
+                    try compress.end();
+
                     if (compressed_size == 0) {
                         if (container == .gzip)
                             print("case {d} gzip level {} compressed size: {d}\n", .{ case_no, level, compressed.pos });
                         compressed_size = compressed.end;
                     }
-                    try testing.expectEqual(compressed_size, n);
                     try testing.expectEqual(compressed_size, compressed.end);
                 }
                 // decompress compressed stream to decompressed stream
                 {
-                    var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
+                    var compressed: std.Io.Reader = .fixed(cmp_buf[0..compressed_size]);
                     var decompressed: Writer = .fixed(&dcm_buf);
                     var decompress: Decompress = .init(&compressed, container, &.{});
                     _ = try decompress.reader.streamRemaining(&decompressed);
@@ -266,7 +239,7 @@ test "compress/decompress" {
                 }
                 // decompressor reader interface
                 {
-                    var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
+                    var compressed: std.Io.Reader = .fixed(cmp_buf[0..compressed_size]);
                     var decompress: Decompress = .init(&compressed, container, &.{});
                     const n = try decompress.reader.readSliceShort(&dcm_buf);
                     try testing.expectEqual(data.len, n);
@@ -276,7 +249,7 @@ test "compress/decompress" {
         }
         // huffman only compression
         {
-            inline for (Container.list) |container| { // for each wrapping
+            for (Container.list) |container| {
                 var compressed_size: usize = if (case.huffman_only_size > 0)
                     case.huffman_only_size - Container.gzip.size() + container.size()
                 else
@@ -284,7 +257,7 @@ test "compress/decompress" {
 
                 // compress original stream to compressed stream
                 {
-                    var original: std.io.Reader = .fixed(data);
+                    var original: std.Io.Reader = .fixed(data);
                     var compressed: Writer = .fixed(&cmp_buf);
                     var cmp = try Compress.Huffman.init(container, &compressed);
                     try cmp.compress(original.reader());
@@ -298,7 +271,7 @@ test "compress/decompress" {
                 }
                 // decompress compressed stream to decompressed stream
                 {
-                    var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
+                    var compressed: std.Io.Reader = .fixed(cmp_buf[0..compressed_size]);
                     var decompress: Decompress = .init(&compressed, container, &.{});
                     var decompressed: Writer = .fixed(&dcm_buf);
                     _ = try decompress.reader.streamRemaining(&decompressed);
@@ -309,7 +282,7 @@ test "compress/decompress" {
 
         // store only
         {
-            inline for (Container.list) |container| { // for each wrapping
+            for (Container.list) |container| {
                 var compressed_size: usize = if (case.store_size > 0)
                     case.store_size - Container.gzip.size() + container.size()
                 else
@@ -317,7 +290,7 @@ test "compress/decompress" {
 
                 // compress original stream to compressed stream
                 {
-                    var original: std.io.Reader = .fixed(data);
+                    var original: std.Io.Reader = .fixed(data);
                     var compressed: Writer = .fixed(&cmp_buf);
                     var cmp = try Compress.SimpleCompressor(.store, container).init(&compressed);
                     try cmp.compress(original.reader());
@@ -332,7 +305,7 @@ test "compress/decompress" {
                 }
                 // decompress compressed stream to decompressed stream
                 {
-                    var compressed: std.io.Reader = .fixed(cmp_buf[0..compressed_size]);
+                    var compressed: std.Io.Reader = .fixed(cmp_buf[0..compressed_size]);
                     var decompress: Decompress = .init(&compressed, container, &.{});
                     var decompressed: Writer = .fixed(&dcm_buf);
                     _ = try decompress.reader.streamRemaining(&decompressed);
@@ -344,13 +317,13 @@ test "compress/decompress" {
 }
 
 fn testDecompress(container: Container, compressed: []const u8, expected_plain: []const u8) !void {
-    var in: std.io.Reader = .fixed(compressed);
-    var aw: std.io.Writer.Allocating = .init(testing.allocator);
+    var in: std.Io.Reader = .fixed(compressed);
+    var aw: std.Io.Writer.Allocating = .init(testing.allocator);
     defer aw.deinit();
 
     var decompress: Decompress = .init(&in, container, &.{});
     _ = try decompress.reader.streamRemaining(&aw.writer);
-    try testing.expectEqualSlices(u8, expected_plain, aw.items);
+    try testing.expectEqualSlices(u8, expected_plain, aw.getWritten());
 }
 
 test "don't read past deflate stream's end" {
@@ -483,17 +456,12 @@ test "public interface" {
     var buffer1: [64]u8 = undefined;
     var buffer2: [64]u8 = undefined;
 
-    // TODO These used to be functions, need to migrate the tests
-    const decompress = void;
-    const compress = void;
-    const store = void;
-
     // decompress
     {
         var plain: Writer = .fixed(&buffer2);
-
-        var in: std.io.Reader = .fixed(gzip_data);
-        try decompress(&in, &plain);
+        var in: std.Io.Reader = .fixed(gzip_data);
+        var d: Decompress = .init(&in, .raw, &.{});
+        _ = try d.reader.streamRemaining(&plain);
         try testing.expectEqualSlices(u8, plain_data, plain.buffered());
     }
 
@@ -502,11 +470,13 @@ test "public interface" {
         var plain: Writer = .fixed(&buffer2);
         var compressed: Writer = .fixed(&buffer1);
 
-        var in: std.io.Reader = .fixed(plain_data);
-        try compress(&in, &compressed, .{});
+        var cmp: Compress = .init(&compressed, &.{}, .{});
+        try cmp.writer.writeAll(plain_data);
+        try cmp.end();
 
-        var r: std.io.Reader = .fixed(&buffer1);
-        try decompress(&r, &plain);
+        var r: std.Io.Reader = .fixed(&buffer1);
+        var d: Decompress = .init(&r, .raw, &.{});
+        _ = try d.reader.streamRemaining(&plain);
         try testing.expectEqualSlices(u8, plain_data, plain.buffered());
     }
 
@@ -515,12 +485,11 @@ test "public interface" {
         var plain: Writer = .fixed(&buffer2);
         var compressed: Writer = .fixed(&buffer1);
 
-        var in: std.io.Reader = .fixed(plain_data);
-        var cmp = try Compress(&compressed, .{});
-        try cmp.compress(&in);
-        try cmp.finish();
+        var cmp: Compress = .init(&compressed, &.{}, .{});
+        try cmp.writer.writeAll(plain_data);
+        try cmp.end();
 
-        var r: std.io.Reader = .fixed(&buffer1);
+        var r: std.Io.Reader = .fixed(&buffer1);
         var dcp = Decompress(&r);
         try dcp.decompress(&plain);
         try testing.expectEqualSlices(u8, plain_data, plain.buffered());
@@ -533,11 +502,12 @@ test "public interface" {
             var plain: Writer = .fixed(&buffer2);
             var compressed: Writer = .fixed(&buffer1);
 
-            var in: std.io.Reader = .fixed(plain_data);
-            try huffman.compress(&in, &compressed);
+            var in: std.Io.Reader = .fixed(plain_data);
+            try HuffmanEncoder.compress(&in, &compressed);
 
-            var r: std.io.Reader = .fixed(&buffer1);
-            try decompress(&r, &plain);
+            var r: std.Io.Reader = .fixed(&buffer1);
+            var d: Decompress = .init(&r, .raw, &.{});
+            _ = try d.reader.streamRemaining(&plain);
             try testing.expectEqualSlices(u8, plain_data, plain.buffered());
         }
 
@@ -546,47 +516,50 @@ test "public interface" {
             var plain: Writer = .fixed(&buffer2);
             var compressed: Writer = .fixed(&buffer1);
 
-            var in: std.io.Reader = .fixed(plain_data);
-            var cmp = try huffman.Compressor(&compressed);
+            var in: std.Io.Reader = .fixed(plain_data);
+            var cmp = try HuffmanEncoder.Compressor(&compressed);
             try cmp.compress(&in);
             try cmp.finish();
 
-            var r: std.io.Reader = .fixed(&buffer1);
-            try decompress(&r, &plain);
+            var r: std.Io.Reader = .fixed(&buffer1);
+            var d: Decompress = .init(&r, .raw, &.{});
+            _ = try d.reader.streamRemaining(&plain);
             try testing.expectEqualSlices(u8, plain_data, plain.buffered());
         }
     }
 
-    // store
-    {
-        // store compress/decompress
-        {
-            var plain: Writer = .fixed(&buffer2);
-            var compressed: Writer = .fixed(&buffer1);
-
-            var in: std.io.Reader = .fixed(plain_data);
-            try store.compress(&in, &compressed);
-
-            var r: std.io.Reader = .fixed(&buffer1);
-            try decompress(&r, &plain);
-            try testing.expectEqualSlices(u8, plain_data, plain.buffered());
-        }
-
-        // store compressor/decompressor
-        {
-            var plain: Writer = .fixed(&buffer2);
-            var compressed: Writer = .fixed(&buffer1);
-
-            var in: std.io.Reader = .fixed(plain_data);
-            var cmp = try store.compressor(&compressed);
-            try cmp.compress(&in);
-            try cmp.finish();
-
-            var r: std.io.Reader = .fixed(&buffer1);
-            try decompress(&r, &plain);
-            try testing.expectEqualSlices(u8, plain_data, plain.buffered());
-        }
-    }
+    // TODO
+    //{
+    //    // store compress/decompress
+    //    {
+    //        var plain: Writer = .fixed(&buffer2);
+    //        var compressed: Writer = .fixed(&buffer1);
+
+    //        var in: std.Io.Reader = .fixed(plain_data);
+    //        try store.compress(&in, &compressed);
+
+    //        var r: std.Io.Reader = .fixed(&buffer1);
+    //        var d: Decompress = .init(&r, .raw, &.{});
+    //        _ = try d.reader.streamRemaining(&plain);
+    //        try testing.expectEqualSlices(u8, plain_data, plain.buffered());
+    //    }
+
+    //    // store compressor/decompressor
+    //    {
+    //        var plain: Writer = .fixed(&buffer2);
+    //        var compressed: Writer = .fixed(&buffer1);
+
+    //        var in: std.Io.Reader = .fixed(plain_data);
+    //        var cmp = try store.compressor(&compressed);
+    //        try cmp.compress(&in);
+    //        try cmp.finish();
+
+    //        var r: std.Io.Reader = .fixed(&buffer1);
+    //        var d: Decompress = .init(&r, .raw, &.{});
+    //        _ = try d.reader.streamRemaining(&plain);
+    //        try testing.expectEqualSlices(u8, plain_data, plain.buffered());
+    //    }
+    //}
 }
 
 pub const match = struct {
@@ -615,26 +588,33 @@ test "zlib should not overshoot" {
         0x03, 0x00, 0x8b, 0x61, 0x0f, 0xa4, 0x52, 0x5a, 0x94, 0x12,
     };
 
-    var stream: std.io.Reader = .fixed(&data);
-    const reader = stream.reader();
+    var reader: std.Io.Reader = .fixed(&data);
 
-    var dcp = Decompress.init(reader);
+    var decompress: Decompress = .init(&reader, .zlib, &.{});
     var out: [128]u8 = undefined;
 
-    // Decompress
-    var n = try dcp.reader().readAll(out[0..]);
+    {
+        const n = try decompress.reader.readSliceShort(out[0..]);
 
-    // Expected decompressed data
-    try std.testing.expectEqual(46, n);
-    try std.testing.expectEqualStrings("Copyright Willem van Schaik, Singapore 1995-96", out[0..n]);
+        // Expected decompressed data
+        try std.testing.expectEqual(46, n);
+        try std.testing.expectEqualStrings("Copyright Willem van Schaik, Singapore 1995-96", out[0..n]);
 
-    // Decompressor don't overshoot underlying reader.
-    // It is leaving it at the end of compressed data chunk.
-    try std.testing.expectEqual(data.len - 4, stream.getPos());
-    try std.testing.expectEqual(0, dcp.unreadBytes());
+        // Decompressor don't overshoot underlying reader.
+        // It is leaving it at the end of compressed data chunk.
+        try std.testing.expectEqual(data.len - 4, reader.seek);
+        // TODO what was this testing, exactly?
+        //try std.testing.expectEqual(0, decompress.unreadBytes());
+    }
 
     // 4 bytes after compressed chunk are available in reader.
-    n = try reader.readAll(out[0..]);
+    const n = try reader.readSliceShort(out[0..]);
     try std.testing.expectEqual(n, 4);
     try std.testing.expectEqualSlices(u8, data[data.len - 4 .. data.len], out[0..n]);
 }
+
+test {
+    _ = HuffmanEncoder;
+    _ = Compress;
+    _ = Decompress;
+}