Commit 08bdaf3bd6

Nameless <truemedian@gmail.com>
2023-03-13 18:46:58
std.http: add http server
* extract http protocol into protocol.zig, as it is shared between client and server * coalesce Request and Response back into Client.zig, they don't contain any large chunks of code anymore * http.Server is implemented as basic as possible, a simple example below: ```zig fn handler(res: *Server.Response) !void { while (true) { defer res.reset(); try res.waitForCompleteHead(); res.headers.transfer_encoding = .{ .content_length = 14 }; res.headers.connection = res.request.headers.connection; try res.sendResponseHead(); _ = try res.write("Hello, World!\n"); if (res.connection.closing) break; } } pub fn main() !void { var server = Server.init(std.heap.page_allocator, .{ .reuse_address = true }); defer server.deinit(); try server.listen(try net.Address.parseIp("127.0.0.1", 8080)); while (true) { const res = try server.accept(.{ .dynamic = 8192 }); const thread = try std.Thread.spawn(.{}, handler, .{res}); thread.detach(); } } ```
1 parent fde05b1
lib/std/http/Client/Request.zig
@@ -1,482 +0,0 @@
-const std = @import("std");
-const http = std.http;
-const Uri = std.Uri;
-const mem = std.mem;
-const assert = std.debug.assert;
-
-const Client = @import("../Client.zig");
-const Connection = Client.Connection;
-const ConnectionNode = Client.ConnectionPool.Node;
-const Response = @import("Response.zig");
-
-const Request = @This();
-
-const read_buffer_size = 8192;
-const ReadBufferIndex = std.math.IntFittingRange(0, read_buffer_size);
-
-uri: Uri,
-client: *Client,
-connection: *ConnectionNode,
-response: Response,
-/// These are stored in Request so that they are available when following
-/// redirects.
-headers: Headers,
-
-redirects_left: u32,
-handle_redirects: bool,
-compression_init: bool,
-
-/// Used as a allocator for resolving redirects locations.
-arena: std.heap.ArenaAllocator,
-
-/// Read buffer for the connection. This is used to pull in large amounts of data from the connection even if the user asks for a small amount. This can probably be removed with careful planning.
-read_buffer: [read_buffer_size]u8 = undefined,
-read_buffer_start: ReadBufferIndex = 0,
-read_buffer_len: ReadBufferIndex = 0,
-
-pub const RequestTransfer = union(enum) {
-    content_length: u64,
-    chunked: void,
-    none: void,
-};
-
-pub const Headers = struct {
-    version: http.Version = .@"HTTP/1.1",
-    method: http.Method = .GET,
-    user_agent: []const u8 = "zig (std.http)",
-    connection: http.Connection = .keep_alive,
-    transfer_encoding: RequestTransfer = .none,
-
-    custom: []const http.CustomHeader = &[_]http.CustomHeader{},
-};
-
-pub const Options = struct {
-    handle_redirects: bool = true,
-    max_redirects: u32 = 3,
-    header_strategy: HeaderStrategy = .{ .dynamic = 16 * 1024 },
-
-    pub const HeaderStrategy = union(enum) {
-        /// In this case, the client's Allocator will be used to store the
-        /// entire HTTP header. This value is the maximum total size of
-        /// HTTP headers allowed, otherwise
-        /// error.HttpHeadersExceededSizeLimit is returned from read().
-        dynamic: usize,
-        /// This is used to store the entire HTTP header. If the HTTP
-        /// header is too big to fit, `error.HttpHeadersExceededSizeLimit`
-        /// is returned from read(). When this is used, `error.OutOfMemory`
-        /// cannot be returned from `read()`.
-        static: []u8,
-    };
-};
-
-/// Frees all resources associated with the request.
-pub fn deinit(req: *Request) void {
-    switch (req.response.compression) {
-        .none => {},
-        .deflate => |*deflate| deflate.deinit(),
-        .gzip => |*gzip| gzip.deinit(),
-        .zstd => |*zstd| zstd.deinit(),
-    }
-
-    if (req.response.header_bytes_owned) {
-        req.response.header_bytes.deinit(req.client.allocator);
-    }
-
-    if (!req.response.done) {
-        // If the response wasn't fully read, then we need to close the connection.
-        req.connection.data.closing = true;
-        req.client.connection_pool.release(req.client, req.connection);
-    }
-
-    req.arena.deinit();
-    req.* = undefined;
-}
-
-pub const ReadRawError = Connection.ReadError || Uri.ParseError || Client.RequestError || error{
-    UnexpectedEndOfStream,
-    TooManyHttpRedirects,
-    HttpRedirectMissingLocation,
-    HttpHeadersInvalid,
-};
-
-pub const ReaderRaw = std.io.Reader(*Request, ReadRawError, readRaw);
-
-/// Read from the underlying stream, without decompressing or parsing the headers. Must be called
-/// after waitForCompleteHead() has returned successfully.
-pub fn readRaw(req: *Request, buffer: []u8) ReadRawError!usize {
-    assert(req.response.state.isContent());
-
-    var index: usize = 0;
-    while (index == 0) {
-        const amt = try req.readRawAdvanced(buffer[index..]);
-        if (amt == 0 and req.response.done) break;
-        index += amt;
-    }
-
-    return index;
-}
-
-fn checkForCompleteHead(req: *Request, buffer: []u8) !usize {
-    switch (req.response.state) {
-        .invalid => unreachable,
-        .start, .seen_r, .seen_rn, .seen_rnr => {},
-        else => return 0, // No more headers to read.
-    }
-
-    const i = req.response.findHeadersEnd(buffer[0..]);
-    if (req.response.state == .invalid) return error.HttpHeadersInvalid;
-
-    const headers_data = buffer[0..i];
-    if (req.response.header_bytes.items.len + headers_data.len > req.response.max_header_bytes) {
-        return error.HttpHeadersExceededSizeLimit;
-    }
-    try req.response.header_bytes.appendSlice(req.client.allocator, headers_data);
-
-    if (req.response.state == .finished) {
-        req.response.headers = try Response.Headers.parse(req.response.header_bytes.items);
-
-        if (req.response.headers.upgrade) |_| {
-            req.connection.data.closing = false;
-            req.response.done = true;
-            return i;
-        }
-
-        if (req.response.headers.connection == .keep_alive) {
-            req.connection.data.closing = false;
-        } else {
-            req.connection.data.closing = true;
-        }
-
-        if (req.response.headers.transfer_encoding) |transfer_encoding| {
-            switch (transfer_encoding) {
-                .chunked => {
-                    req.response.next_chunk_length = 0;
-                    req.response.state = .chunk_size;
-                },
-            }
-        } else if (req.response.headers.content_length) |content_length| {
-            req.response.next_chunk_length = content_length;
-
-            if (content_length == 0) req.response.done = true;
-        } else {
-            req.response.done = true;
-        }
-
-        return i;
-    }
-
-    return 0;
-}
-
-pub const WaitForCompleteHeadError = ReadRawError || error{
-    UnexpectedEndOfStream,
-
-    HttpHeadersExceededSizeLimit,
-    ShortHttpStatusLine,
-    BadHttpVersion,
-    HttpHeaderContinuationsUnsupported,
-    HttpTransferEncodingUnsupported,
-    HttpConnectionHeaderUnsupported,
-};
-
-/// Reads a complete response head. Any leftover data is stored in the request. This function is idempotent.
-pub fn waitForCompleteHead(req: *Request) WaitForCompleteHeadError!void {
-    if (req.response.state.isContent()) return;
-
-    while (true) {
-        const nread = try req.connection.data.read(req.read_buffer[0..]);
-        const amt = try checkForCompleteHead(req, req.read_buffer[0..nread]);
-
-        if (amt != 0) {
-            req.read_buffer_start = @intCast(ReadBufferIndex, amt);
-            req.read_buffer_len = @intCast(ReadBufferIndex, nread);
-            return;
-        } else if (nread == 0) {
-            return error.UnexpectedEndOfStream;
-        }
-    }
-}
-
-/// This one can return 0 without meaning EOF.
-fn readRawAdvanced(req: *Request, buffer: []u8) !usize {
-    assert(req.response.state.isContent());
-    if (req.response.done) return 0;
-
-    // var in: []const u8 = undefined;
-    if (req.read_buffer_start == req.read_buffer_len) {
-        const nread = try req.connection.data.read(req.read_buffer[0..]);
-        if (nread == 0) return error.UnexpectedEndOfStream;
-
-        req.read_buffer_start = 0;
-        req.read_buffer_len = @intCast(ReadBufferIndex, nread);
-    }
-
-    var out_index: usize = 0;
-    while (true) {
-        switch (req.response.state) {
-            .invalid, .start, .seen_r, .seen_rn, .seen_rnr => unreachable,
-            .finished => {
-                // TODO https://github.com/ziglang/zig/issues/14039
-                const buf_avail = req.read_buffer_len - req.read_buffer_start;
-                const data_avail = req.response.next_chunk_length;
-                const out_avail = buffer.len;
-
-                if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
-                    const can_read = @intCast(usize, @min(buf_avail, data_avail));
-                    req.response.next_chunk_length -= can_read;
-
-                    if (req.response.next_chunk_length == 0) {
-                        req.client.connection_pool.release(req.client, req.connection);
-                        req.connection = undefined;
-                        req.response.done = true;
-                    }
-
-                    return 0; // skip over as much data as possible
-                }
-
-                const can_read = @intCast(usize, @min(@min(buf_avail, data_avail), out_avail));
-                req.response.next_chunk_length -= can_read;
-
-                mem.copy(u8, buffer[0..], req.read_buffer[req.read_buffer_start..][0..can_read]);
-                req.read_buffer_start += @intCast(ReadBufferIndex, can_read);
-
-                if (req.response.next_chunk_length == 0) {
-                    req.client.connection_pool.release(req.client, req.connection);
-                    req.connection = undefined;
-                    req.response.done = true;
-                }
-
-                return can_read;
-            },
-            .chunk_size_prefix_r => switch (req.read_buffer_len - req.read_buffer_start) {
-                0 => return out_index,
-                1 => switch (req.read_buffer[req.read_buffer_start]) {
-                    '\r' => {
-                        req.response.state = .chunk_size_prefix_n;
-                        return out_index;
-                    },
-                    else => {
-                        req.response.state = .invalid;
-                        return error.HttpHeadersInvalid;
-                    },
-                },
-                else => switch (int16(req.read_buffer[req.read_buffer_start..][0..2])) {
-                    int16("\r\n") => {
-                        req.read_buffer_start += 2;
-                        req.response.state = .chunk_size;
-                        continue;
-                    },
-                    else => {
-                        req.response.state = .invalid;
-                        return error.HttpHeadersInvalid;
-                    },
-                },
-            },
-            .chunk_size_prefix_n => switch (req.read_buffer_len - req.read_buffer_start) {
-                0 => return out_index,
-                else => switch (req.read_buffer[req.read_buffer_start]) {
-                    '\n' => {
-                        req.read_buffer_start += 1;
-                        req.response.state = .chunk_size;
-                        continue;
-                    },
-                    else => {
-                        req.response.state = .invalid;
-                        return error.HttpHeadersInvalid;
-                    },
-                },
-            },
-            .chunk_size, .chunk_r => {
-                const i = req.response.findChunkedLen(req.read_buffer[req.read_buffer_start..req.read_buffer_len]);
-                switch (req.response.state) {
-                    .invalid => return error.HttpHeadersInvalid,
-                    .chunk_data => {
-                        if (req.response.next_chunk_length == 0) {
-                            req.response.done = true;
-                            req.client.connection_pool.release(req.client, req.connection);
-                            req.connection = undefined;
-
-                            return out_index;
-                        }
-
-                        req.read_buffer_start += @intCast(ReadBufferIndex, i);
-                        continue;
-                    },
-                    .chunk_size => return out_index,
-                    else => unreachable,
-                }
-            },
-            .chunk_data => {
-                // TODO https://github.com/ziglang/zig/issues/14039
-                const buf_avail = req.read_buffer_len - req.read_buffer_start;
-                const data_avail = req.response.next_chunk_length;
-                const out_avail = buffer.len - out_index;
-
-                if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
-                    const can_read = @intCast(usize, @min(buf_avail, data_avail));
-                    req.response.next_chunk_length -= can_read;
-
-                    if (req.response.next_chunk_length == 0) {
-                        req.client.connection_pool.release(req.client, req.connection);
-                        req.connection = undefined;
-                        req.response.done = true;
-                        continue;
-                    }
-
-                    return 0; // skip over as much data as possible
-                }
-
-                const can_read = @intCast(usize, @min(@min(buf_avail, data_avail), out_avail));
-                req.response.next_chunk_length -= can_read;
-
-                mem.copy(u8, buffer[out_index..], req.read_buffer[req.read_buffer_start..][0..can_read]);
-                req.read_buffer_start += @intCast(ReadBufferIndex, can_read);
-                out_index += can_read;
-
-                if (req.response.next_chunk_length == 0) {
-                    req.response.state = .chunk_size_prefix_r;
-
-                    continue;
-                }
-
-                return out_index;
-            },
-        }
-    }
-}
-
-pub const ReadError = Client.DeflateDecompressor.Error || Client.GzipDecompressor.Error || Client.ZstdDecompressor.Error || WaitForCompleteHeadError || error{ BadHeader, InvalidCompression, StreamTooLong, InvalidWindowSize, CompressionNotSupported };
-
-pub const Reader = std.io.Reader(*Request, ReadError, read);
-
-pub fn reader(req: *Request) Reader {
-    return .{ .context = req };
-}
-
-pub fn read(req: *Request, buffer: []u8) ReadError!usize {
-    while (true) {
-        if (!req.response.state.isContent()) try req.waitForCompleteHead();
-
-        if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
-            assert(try req.readRaw(buffer) == 0);
-
-            if (req.redirects_left == 0) return error.TooManyHttpRedirects;
-
-            const location = req.response.headers.location orelse
-                return error.HttpRedirectMissingLocation;
-            const new_url = Uri.parse(location) catch try Uri.parseWithoutScheme(location);
-
-            var new_arena = std.heap.ArenaAllocator.init(req.client.allocator);
-            const resolved_url = try req.uri.resolve(new_url, false, new_arena.allocator());
-            errdefer new_arena.deinit();
-
-            req.arena.deinit();
-            req.arena = new_arena;
-
-            const new_req = try req.client.request(resolved_url, req.headers, .{
-                .max_redirects = req.redirects_left - 1,
-                .header_strategy = if (req.response.header_bytes_owned) .{
-                    .dynamic = req.response.max_header_bytes,
-                } else .{
-                    .static = req.response.header_bytes.unusedCapacitySlice(),
-                },
-            });
-            req.deinit();
-            req.* = new_req;
-        } else {
-            break;
-        }
-    }
-
-    if (req.response.compression == .none) {
-        if (req.response.headers.transfer_compression) |compression| {
-            switch (compression) {
-                .compress => return error.CompressionNotSupported,
-                .deflate => req.response.compression = .{
-                    .deflate = try std.compress.zlib.zlibStream(req.client.allocator, ReaderRaw{ .context = req }),
-                },
-                .gzip => req.response.compression = .{
-                    .gzip = try std.compress.gzip.decompress(req.client.allocator, ReaderRaw{ .context = req }),
-                },
-                .zstd => req.response.compression = .{
-                    .zstd = std.compress.zstd.decompressStream(req.client.allocator, ReaderRaw{ .context = req }),
-                },
-            }
-        }
-    }
-
-    return switch (req.response.compression) {
-        .deflate => |*deflate| try deflate.read(buffer),
-        .gzip => |*gzip| try gzip.read(buffer),
-        .zstd => |*zstd| try zstd.read(buffer),
-        else => try req.readRaw(buffer),
-    };
-}
-
-pub fn readAll(req: *Request, buffer: []u8) !usize {
-    var index: usize = 0;
-    while (index < buffer.len) {
-        const amt = try read(req, buffer[index..]);
-        if (amt == 0) break;
-        index += amt;
-    }
-    return index;
-}
-
-pub const WriteError = Connection.WriteError || error{MessageTooLong};
-
-pub const Writer = std.io.Writer(*Request, WriteError, write);
-
-pub fn writer(req: *Request) Writer {
-    return .{ .context = req };
-}
-
-/// Write `bytes` to the server. The `transfer_encoding` request header determines how data will be sent.
-pub fn write(req: *Request, bytes: []const u8) !usize {
-    switch (req.headers.transfer_encoding) {
-        .chunked => {
-            try req.connection.data.writer().print("{x}\r\n", .{bytes.len});
-            try req.connection.data.writeAll(bytes);
-            try req.connection.data.writeAll("\r\n");
-
-            return bytes.len;
-        },
-        .content_length => |*len| {
-            if (len.* < bytes.len) return error.MessageTooLong;
-
-            const amt = try req.connection.data.write(bytes);
-            len.* -= amt;
-            return amt;
-        },
-        .none => return error.NotWriteable,
-    }
-}
-
-/// Finish the body of a request. This notifies the server that you have no more data to send.
-pub fn finish(req: *Request) !void {
-    switch (req.headers.transfer_encoding) {
-        .chunked => try req.connection.data.writeAll("0\r\n"),
-        .content_length => |len| if (len != 0) return error.MessageNotCompleted,
-        .none => {},
-    }
-}
-
-inline fn int16(array: *const [2]u8) u16 {
-    return @bitCast(u16, array.*);
-}
-
-inline fn int32(array: *const [4]u8) u32 {
-    return @bitCast(u32, array.*);
-}
-
-inline fn int64(array: *const [8]u8) u64 {
-    return @bitCast(u64, array.*);
-}
-
-test {
-    const builtin = @import("builtin");
-
-    if (builtin.os.tag == .wasi) return error.SkipZigTest;
-
-    _ = Response;
-}
lib/std/http/Client/Response.zig
@@ -4,6 +4,7 @@ const mem = std.mem;
 const testing = std.testing;
 const assert = std.debug.assert;
 
+const protocol = @import("../protocol.zig");
 const Client = @import("../Client.zig");
 const Response = @This();
 
@@ -169,14 +170,6 @@ pub const Headers = struct {
     }
 };
 
-inline fn int16(array: *const [2]u8) u16 {
-    return @bitCast(u16, array.*);
-}
-
-inline fn int32(array: *const [4]u8) u32 {
-    return @bitCast(u32, array.*);
-}
-
 inline fn int64(array: *const [8]u8) u64 {
     return @bitCast(u64, array.*);
 }
@@ -226,232 +219,6 @@ pub fn initStatic(buf: []u8) Response {
     };
 }
 
-/// Returns how many bytes are part of HTTP headers. Always less than or
-/// equal to bytes.len. If the amount returned is less than bytes.len, it
-/// means the headers ended and the first byte after the double \r\n\r\n is
-/// located at `bytes[result]`.
-pub fn findHeadersEnd(r: *Response, bytes: []const u8) usize {
-    var index: usize = 0;
-
-    // TODO: https://github.com/ziglang/zig/issues/8220
-    state: while (true) {
-        switch (r.state) {
-            .invalid => unreachable,
-            .finished => unreachable,
-            .start => while (true) {
-                switch (bytes.len - index) {
-                    0 => return index,
-                    1 => {
-                        if (bytes[index] == '\r')
-                            r.state = .seen_r;
-                        return index + 1;
-                    },
-                    2 => {
-                        if (int16(bytes[index..][0..2]) == int16("\r\n")) {
-                            r.state = .seen_rn;
-                        } else if (bytes[index + 1] == '\r') {
-                            r.state = .seen_r;
-                        }
-                        return index + 2;
-                    },
-                    3 => {
-                        if (int16(bytes[index..][0..2]) == int16("\r\n") and
-                            bytes[index + 2] == '\r')
-                        {
-                            r.state = .seen_rnr;
-                        } else if (int16(bytes[index + 1 ..][0..2]) == int16("\r\n")) {
-                            r.state = .seen_rn;
-                        } else if (bytes[index + 2] == '\r') {
-                            r.state = .seen_r;
-                        }
-                        return index + 3;
-                    },
-                    4...15 => {
-                        if (int32(bytes[index..][0..4]) == int32("\r\n\r\n")) {
-                            r.state = .finished;
-                            return index + 4;
-                        } else if (int16(bytes[index + 1 ..][0..2]) == int16("\r\n") and
-                            bytes[index + 3] == '\r')
-                        {
-                            r.state = .seen_rnr;
-                            index += 4;
-                            continue :state;
-                        } else if (int16(bytes[index + 2 ..][0..2]) == int16("\r\n")) {
-                            r.state = .seen_rn;
-                            index += 4;
-                            continue :state;
-                        } else if (bytes[index + 3] == '\r') {
-                            r.state = .seen_r;
-                            index += 4;
-                            continue :state;
-                        }
-                        index += 4;
-                        continue;
-                    },
-                    else => {
-                        const chunk = bytes[index..][0..16];
-                        const v: @Vector(16, u8) = chunk.*;
-                        const matches_r = v == @splat(16, @as(u8, '\r'));
-                        const iota = std.simd.iota(u8, 16);
-                        const default = @splat(16, @as(u8, 16));
-                        const sub_index = @reduce(.Min, @select(u8, matches_r, iota, default));
-                        switch (sub_index) {
-                            0...12 => {
-                                index += sub_index + 4;
-                                if (int32(chunk[sub_index..][0..4]) == int32("\r\n\r\n")) {
-                                    r.state = .finished;
-                                    return index;
-                                }
-                                continue;
-                            },
-                            13 => {
-                                index += 16;
-                                if (int16(chunk[14..][0..2]) == int16("\n\r")) {
-                                    r.state = .seen_rnr;
-                                    continue :state;
-                                }
-                                continue;
-                            },
-                            14 => {
-                                index += 16;
-                                if (chunk[15] == '\n') {
-                                    r.state = .seen_rn;
-                                    continue :state;
-                                }
-                                continue;
-                            },
-                            15 => {
-                                r.state = .seen_r;
-                                index += 16;
-                                continue :state;
-                            },
-                            16 => {
-                                index += 16;
-                                continue;
-                            },
-                            else => unreachable,
-                        }
-                    },
-                }
-            },
-
-            .seen_r => switch (bytes.len - index) {
-                0 => return index,
-                1 => {
-                    switch (bytes[index]) {
-                        '\n' => r.state = .seen_rn,
-                        '\r' => r.state = .seen_r,
-                        else => r.state = .start,
-                    }
-                    return index + 1;
-                },
-                2 => {
-                    if (int16(bytes[index..][0..2]) == int16("\n\r")) {
-                        r.state = .seen_rnr;
-                        return index + 2;
-                    }
-                    r.state = .start;
-                    return index + 2;
-                },
-                else => {
-                    if (int16(bytes[index..][0..2]) == int16("\n\r") and
-                        bytes[index + 2] == '\n')
-                    {
-                        r.state = .finished;
-                        return index + 3;
-                    }
-                    index += 3;
-                    r.state = .start;
-                    continue :state;
-                },
-            },
-            .seen_rn => switch (bytes.len - index) {
-                0 => return index,
-                1 => {
-                    switch (bytes[index]) {
-                        '\r' => r.state = .seen_rnr,
-                        else => r.state = .start,
-                    }
-                    return index + 1;
-                },
-                else => {
-                    if (int16(bytes[index..][0..2]) == int16("\r\n")) {
-                        r.state = .finished;
-                        return index + 2;
-                    }
-                    index += 2;
-                    r.state = .start;
-                    continue :state;
-                },
-            },
-            .seen_rnr => switch (bytes.len - index) {
-                0 => return index,
-                else => {
-                    if (bytes[index] == '\n') {
-                        r.state = .finished;
-                        return index + 1;
-                    }
-                    index += 1;
-                    r.state = .start;
-                    continue :state;
-                },
-            },
-            .chunk_size_prefix_r => unreachable,
-            .chunk_size_prefix_n => unreachable,
-            .chunk_size => unreachable,
-            .chunk_r => unreachable,
-            .chunk_data => unreachable,
-        }
-
-        return index;
-    }
-}
-
-pub fn findChunkedLen(r: *Response, bytes: []const u8) usize {
-    var i: usize = 0;
-    if (r.state == .chunk_size) {
-        while (i < bytes.len) : (i += 1) {
-            const digit = switch (bytes[i]) {
-                '0'...'9' => |b| b - '0',
-                'A'...'Z' => |b| b - 'A' + 10,
-                'a'...'z' => |b| b - 'a' + 10,
-                '\r' => {
-                    r.state = .chunk_r;
-                    i += 1;
-                    break;
-                },
-                else => {
-                    r.state = .invalid;
-                    return i;
-                },
-            };
-            const mul = @mulWithOverflow(r.next_chunk_length, 16);
-            if (mul[1] != 0) {
-                r.state = .invalid;
-                return i;
-            }
-            const add = @addWithOverflow(mul[0], digit);
-            if (add[1] != 0) {
-                r.state = .invalid;
-                return i;
-            }
-            r.next_chunk_length = add[0];
-        } else {
-            return i;
-        }
-    }
-    assert(r.state == .chunk_r);
-    if (i == bytes.len) return i;
-
-    if (bytes[i] == '\n') {
-        r.state = .chunk_data;
-        return i + 1;
-    } else {
-        r.state = .invalid;
-        return i;
-    }
-}
-
 fn parseInt3(nnn: @Vector(3, u8)) u10 {
     const zero: @Vector(3, u8) = .{ '0', '0', '0' };
     const mmm: @Vector(3, u10) = .{ 100, 10, 1 };
lib/std/http/Client.zig
@@ -1,23 +1,19 @@
-//! TODO: send connection: keep-alive and LRU cache a configurable number of
-//! open connections to skip DNS and TLS handshake for subsequent requests.
-//!
-//! This API is *not* thread safe.
+//! Connecting and opening requests are threadsafe. Individual requests are not.
 
 const std = @import("../std.zig");
-const mem = std.mem;
-const assert = std.debug.assert;
+const testing = std.testing;
 const http = std.http;
+const mem = std.mem;
 const net = std.net;
-const Client = @This();
 const Uri = std.Uri;
-const Allocator = std.mem.Allocator;
-const testing = std.testing;
+const Allocator = mem.Allocator;
+const assert = std.debug.assert;
 
-pub const Request = @import("Client/Request.zig");
-pub const Response = @import("Client/Response.zig");
+const Client = @This();
+const proto = @import("protocol.zig");
 
 pub const default_connection_pool_size = 32;
-const connection_pool_size = std.options.http_connection_pool_size;
+pub const connection_pool_size = std.options.http_connection_pool_size;
 
 /// Used for tcpConnectToHost and storing HTTP headers when an externally
 /// managed buffer is not provided.
@@ -43,7 +39,7 @@ pub const ConnectionPool = struct {
     used: Queue = .{},
     free: Queue = .{},
     free_len: usize = 0,
-    free_size: usize = default_connection_pool_size,
+    free_size: usize = connection_pool_size,
 
     /// Finds and acquires a connection from the connection pool matching the criteria. This function is threadsafe.
     /// If no connection is found, null is returned.
@@ -55,7 +51,7 @@ pub const ConnectionPool = struct {
         while (next) |node| : (next = node.prev) {
             if ((node.data.protocol == .tls) != criteria.is_tls) continue;
             if (node.data.port != criteria.port) continue;
-            if (std.mem.eql(u8, node.data.host, criteria.host)) continue;
+            if (mem.eql(u8, node.data.host, criteria.host)) continue;
 
             pool.acquireUnsafe(node);
             return node;
@@ -137,9 +133,9 @@ pub const ConnectionPool = struct {
     }
 };
 
-pub const DeflateDecompressor = std.compress.zlib.ZlibStream(Request.ReaderRaw);
-pub const GzipDecompressor = std.compress.gzip.Decompress(Request.ReaderRaw);
-pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Request.ReaderRaw, .{});
+pub const DeflateDecompressor = std.compress.zlib.ZlibStream(Request.TransferReader);
+pub const GzipDecompressor = std.compress.gzip.Decompress(Request.TransferReader);
+pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Request.TransferReader, .{});
 
 pub const Connection = struct {
     stream: net.Stream,
@@ -220,6 +216,379 @@ pub const Connection = struct {
     }
 };
 
+pub const RequestTransfer = union(enum) {
+    content_length: u64,
+    chunked: void,
+    none: void,
+};
+
+pub const Compression = union(enum) {
+    deflate: DeflateDecompressor,
+    gzip: GzipDecompressor,
+    zstd: ZstdDecompressor,
+    none: void,
+};
+
+pub const Response = struct {
+    pub const Headers = struct {
+        status: http.Status,
+        version: http.Version,
+        location: ?[]const u8 = null,
+        content_length: ?u64 = null,
+        transfer_encoding: ?http.TransferEncoding = null,
+        transfer_compression: ?http.ContentEncoding = null,
+        connection: http.Connection = .close,
+        upgrade: ?[]const u8 = null,
+
+        pub const ParseError = error{
+            ShortHttpStatusLine,
+            BadHttpVersion,
+            HttpHeadersInvalid,
+            HttpHeaderContinuationsUnsupported,
+            HttpTransferEncodingUnsupported,
+            HttpConnectionHeaderUnsupported,
+            InvalidCharacter,
+        };
+
+        pub fn parse(bytes: []const u8) !Headers {
+            var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
+
+            const first_line = it.next() orelse return error.HttpHeadersInvalid;
+            if (first_line.len < 12)
+                return error.ShortHttpStatusLine;
+
+            const version: http.Version = switch (int64(first_line[0..8])) {
+                int64("HTTP/1.0") => .@"HTTP/1.0",
+                int64("HTTP/1.1") => .@"HTTP/1.1",
+                else => return error.BadHttpVersion,
+            };
+            if (first_line[8] != ' ') return error.HttpHeadersInvalid;
+            const status = @intToEnum(http.Status, parseInt3(first_line[9..12].*));
+
+            var headers: Headers = .{
+                .version = version,
+                .status = status,
+            };
+
+            while (it.next()) |line| {
+                if (line.len == 0) return error.HttpHeadersInvalid;
+                switch (line[0]) {
+                    ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
+                    else => {},
+                }
+
+                var line_it = mem.tokenize(u8, line, ": ");
+                const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
+                const header_value = line_it.rest();
+                if (std.ascii.eqlIgnoreCase(header_name, "location")) {
+                    if (headers.location != null) return error.HttpHeadersInvalid;
+                    headers.location = header_value;
+                } else if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
+                    if (headers.content_length != null) return error.HttpHeadersInvalid;
+                    headers.content_length = try std.fmt.parseInt(u64, header_value, 10);
+                } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+                    // Transfer-Encoding: second, first
+                    // Transfer-Encoding: deflate, chunked
+                    var iter = mem.splitBackwards(u8, header_value, ",");
+
+                    if (iter.next()) |first| {
+                        const trimmed = mem.trim(u8, first, " ");
+
+                        if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
+                            if (headers.transfer_encoding != null) return error.HttpHeadersInvalid;
+                            headers.transfer_encoding = te;
+                        } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+                            if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+                            headers.transfer_compression = ce;
+                        } else {
+                            return error.HttpTransferEncodingUnsupported;
+                        }
+                    }
+
+                    if (iter.next()) |second| {
+                        if (headers.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
+
+                        const trimmed = mem.trim(u8, second, " ");
+
+                        if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+                            headers.transfer_compression = ce;
+                        } else {
+                            return error.HttpTransferEncodingUnsupported;
+                        }
+                    }
+
+                    if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
+                } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
+                    if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+
+                    const trimmed = mem.trim(u8, header_value, " ");
+
+                    if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+                        headers.transfer_compression = ce;
+                    } else {
+                        return error.HttpTransferEncodingUnsupported;
+                    }
+                } else if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
+                    if (std.ascii.eqlIgnoreCase(header_value, "keep-alive")) {
+                        headers.connection = .keep_alive;
+                    } else if (std.ascii.eqlIgnoreCase(header_value, "close")) {
+                        headers.connection = .close;
+                    } else {
+                        return error.HttpConnectionHeaderUnsupported;
+                    }
+                } else if (std.ascii.eqlIgnoreCase(header_name, "upgrade")) {
+                    headers.upgrade = header_value;
+                }
+            }
+
+            return headers;
+        }
+
+        inline fn int64(array: *const [8]u8) u64 {
+            return @bitCast(u64, array.*);
+        }
+
+        fn parseInt3(nnn: @Vector(3, u8)) u10 {
+            const zero: @Vector(3, u8) = .{ '0', '0', '0' };
+            const mmm: @Vector(3, u10) = .{ 100, 10, 1 };
+            return @reduce(.Add, @as(@Vector(3, u10), nnn -% zero) *% mmm);
+        }
+
+        test parseInt3 {
+            const expectEqual = testing.expectEqual;
+            try expectEqual(@as(u10, 0), parseInt3("000".*));
+            try expectEqual(@as(u10, 418), parseInt3("418".*));
+            try expectEqual(@as(u10, 999), parseInt3("999".*));
+        }
+    };
+
+    headers: Headers = undefined,
+    parser: proto.HeadersParser,
+    compression: Compression = .none,
+    skip: bool = false,
+};
+
+pub const Request = struct {
+    pub const Headers = struct {
+        version: http.Version = .@"HTTP/1.1",
+        method: http.Method = .GET,
+        user_agent: []const u8 = "zig (std.http)",
+        connection: http.Connection = .keep_alive,
+        transfer_encoding: RequestTransfer = .none,
+
+        custom: []const http.CustomHeader = &[_]http.CustomHeader{},
+    };
+
+    uri: Uri,
+    client: *Client,
+    connection: *ConnectionPool.Node,
+    /// These are stored in Request so that they are available when following
+    /// redirects.
+    headers: Headers,
+
+    redirects_left: u32,
+    handle_redirects: bool,
+
+    response: Response,
+
+    /// Used as a allocator for resolving redirects locations.
+    arena: std.heap.ArenaAllocator,
+
+    /// Frees all resources associated with the request.
+    pub fn deinit(req: *Request) void {
+        switch (req.response.compression) {
+            .none => {},
+            .deflate => |*deflate| deflate.deinit(),
+            .gzip => |*gzip| gzip.deinit(),
+            .zstd => |*zstd| zstd.deinit(),
+        }
+
+        if (req.response.parser.header_bytes_owned) {
+            req.response.parser.header_bytes.deinit(req.client.allocator);
+        }
+
+        if (!req.response.parser.done) {
+            // If the response wasn't fully read, then we need to close the connection.
+            req.connection.data.closing = true;
+            req.client.connection_pool.release(req.client, req.connection);
+        }
+
+        req.arena.deinit();
+        req.* = undefined;
+    }
+
+    pub const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
+
+    pub const TransferReader = std.io.Reader(*Request, TransferReadError, transferRead);
+
+    pub fn transferReader(req: *Request) TransferReader {
+        return .{ .context = req };
+    }
+
+    pub fn transferRead(req: *Request, buf: []u8) TransferReadError!usize {
+        if (req.response.parser.isComplete()) return 0;
+
+        var index: usize = 0;
+        while (index == 0) {
+            const amt = try req.response.parser.read(req.connection.data.reader(), buf[index..], req.response.skip);
+            if (amt == 0 and req.response.parser.isComplete()) break;
+            index += amt;
+        }
+
+        return index;
+    }
+
+    pub const WaitForCompleteHeadError = Connection.ReadError || proto.HeadersParser.WaitForCompleteHeadError || Response.Headers.ParseError || error{ BadHeader, InvalidCompression, StreamTooLong, InvalidWindowSize } || error{CompressionNotSupported};
+
+    pub fn waitForCompleteHead(req: *Request) !void {
+        try req.response.parser.waitForCompleteHead(req.connection.data.reader(), req.client.allocator);
+
+        req.response.headers = try Response.Headers.parse(req.response.parser.header_bytes.items);
+
+        if (req.response.headers.status == .switching_protocols) {
+            req.connection.data.closing = false;
+            req.response.parser.done = true;
+        }
+
+        if (req.headers.connection == .keep_alive and req.response.headers.connection == .keep_alive) {
+            req.connection.data.closing = false;
+        } else {
+            req.connection.data.closing = true;
+        }
+
+        if (req.response.headers.transfer_encoding) |te| {
+            switch (te) {
+                .chunked => {
+                    req.response.parser.next_chunk_length = 0;
+                    req.response.parser.state = .chunk_head_size;
+                },
+            }
+        } else if (req.response.headers.content_length) |cl| {
+            req.response.parser.next_chunk_length = cl;
+
+            if (cl == 0) req.response.parser.done = true;
+        } else {
+            req.response.parser.done = true;
+        }
+
+        if (!req.response.parser.done) {
+            if (req.response.headers.transfer_compression) |tc| switch (tc) {
+                .compress => return error.CompressionNotSupported,
+                .deflate => req.response.compression = .{
+                    .deflate = try std.compress.zlib.zlibStream(req.client.allocator, req.transferReader()),
+                },
+                .gzip => req.response.compression = .{
+                    .gzip = try std.compress.gzip.decompress(req.client.allocator, req.transferReader()),
+                },
+                .zstd => req.response.compression = .{
+                    .zstd = std.compress.zstd.decompressStream(req.client.allocator, req.transferReader()),
+                },
+            };
+        }
+
+        if (req.response.headers.status.class() == .redirect and req.handle_redirects) req.response.skip = true;
+    }
+
+    pub const ReadError = RequestError || Client.DeflateDecompressor.Error || Client.GzipDecompressor.Error || Client.ZstdDecompressor.Error || WaitForCompleteHeadError || error{ TooManyHttpRedirects, HttpRedirectMissingLocation, InvalidFormat, InvalidPort, UnexpectedCharacter };
+
+    pub const Reader = std.io.Reader(*Request, ReadError, read);
+
+    pub fn reader(req: *Request) Reader {
+        return .{ .context = req };
+    }
+
+    pub fn read(req: *Request, buffer: []u8) ReadError!usize {
+        while (true) {
+            if (!req.response.parser.state.isContent()) try req.waitForCompleteHead();
+
+            if (req.handle_redirects and req.response.headers.status.class() == .redirect) {
+                assert(try req.transferRead(buffer) == 0);
+
+                if (req.redirects_left == 0) return error.TooManyHttpRedirects;
+
+                const location = req.response.headers.location orelse
+                    return error.HttpRedirectMissingLocation;
+                const new_url = Uri.parse(location) catch try Uri.parseWithoutScheme(location);
+
+                var new_arena = std.heap.ArenaAllocator.init(req.client.allocator);
+                const resolved_url = try req.uri.resolve(new_url, false, new_arena.allocator());
+                errdefer new_arena.deinit();
+
+                req.arena.deinit();
+                req.arena = new_arena;
+
+                const new_req = try req.client.request(resolved_url, req.headers, .{
+                    .max_redirects = req.redirects_left - 1,
+                    .header_strategy = if (req.response.parser.header_bytes_owned) .{
+                        .dynamic = req.response.parser.max_header_bytes,
+                    } else .{
+                        .static = req.response.parser.header_bytes.items.ptr[0..req.response.parser.max_header_bytes],
+                    },
+                });
+                req.deinit();
+                req.* = new_req;
+            } else {
+                break;
+            }
+        }
+
+        return switch (req.response.compression) {
+            .deflate => |*deflate| try deflate.read(buffer),
+            .gzip => |*gzip| try gzip.read(buffer),
+            .zstd => |*zstd| try zstd.read(buffer),
+            else => try req.transferRead(buffer),
+        };
+    }
+
+    pub fn readAll(req: *Request, buffer: []u8) !usize {
+        var index: usize = 0;
+        while (index < buffer.len) {
+            const amt = try read(req, buffer[index..]);
+            if (amt == 0) break;
+            index += amt;
+        }
+        return index;
+    }
+
+    pub const WriteError = Connection.WriteError || error{ NotWriteable, MessageTooLong };
+
+    pub const Writer = std.io.Writer(*Request, WriteError, write);
+
+    pub fn writer(req: *Request) Writer {
+        return .{ .context = req };
+    }
+
+    /// Write `bytes` to the server. The `transfer_encoding` request header determines how data will be sent.
+    pub fn write(req: *Request, bytes: []const u8) WriteError!usize {
+        switch (req.headers.transfer_encoding) {
+            .chunked => {
+                try req.connection.data.writer().print("{x}\r\n", .{bytes.len});
+                try req.connection.data.writeAll(bytes);
+                try req.connection.data.writeAll("\r\n");
+
+                return bytes.len;
+            },
+            .content_length => |*len| {
+                if (len.* < bytes.len) return error.MessageTooLong;
+
+                const amt = try req.connection.data.write(bytes);
+                len.* -= amt;
+                return amt;
+            },
+            .none => return error.NotWriteable,
+        }
+    }
+
+    /// Finish the body of a request. This notifies the server that you have no more data to send.
+    pub fn finish(req: *Request) !void {
+        switch (req.headers.transfer_encoding) {
+            .chunked => try req.connection.data.writeAll("0\r\n"),
+            .content_length => |len| if (len != 0) return error.MessageNotCompleted,
+            .none => {},
+        }
+    }
+};
+
 pub fn deinit(client: *Client) void {
     client.connection_pool.deinit(client);
 
@@ -227,7 +596,7 @@ pub fn deinit(client: *Client) void {
     client.* = undefined;
 }
 
-pub const ConnectError = std.mem.Allocator.Error || net.TcpConnectToHostError || std.crypto.tls.Client.InitError(net.Stream);
+pub const ConnectError = Allocator.Error || net.TcpConnectToHostError || std.crypto.tls.Client.InitError(net.Stream);
 
 pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectError!*ConnectionPool.Node {
     if (client.connection_pool.findConnection(.{
@@ -276,7 +645,26 @@ pub const RequestError = ConnectError || Connection.WriteError || error{
     EndOfStream,
 };
 
-pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Request.Options) RequestError!Request {
+pub const Options = struct {
+    handle_redirects: bool = true,
+    max_redirects: u32 = 3,
+    header_strategy: HeaderStrategy = .{ .dynamic = 16 * 1024 },
+
+    pub const HeaderStrategy = union(enum) {
+        /// In this case, the client's Allocator will be used to store the
+        /// entire HTTP header. This value is the maximum total size of
+        /// HTTP headers allowed, otherwise
+        /// error.HttpHeadersExceededSizeLimit is returned from read().
+        dynamic: usize,
+        /// This is used to store the entire HTTP header. If the HTTP
+        /// header is too big to fit, `error.HttpHeadersExceededSizeLimit`
+        /// is returned from read(). When this is used, `error.OutOfMemory`
+        /// cannot be returned from `read()`.
+        static: []u8,
+    };
+};
+
+pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Options) RequestError!Request {
     const protocol: Connection.Protocol = if (mem.eql(u8, uri.scheme, "http"))
         .plain
     else if (mem.eql(u8, uri.scheme, "https"))
@@ -304,14 +692,15 @@ pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Req
     var req: Request = .{
         .uri = uri,
         .client = client,
-        .headers = headers,
         .connection = try client.connect(host, port, protocol),
+        .headers = headers,
         .redirects_left = options.max_redirects,
         .handle_redirects = options.handle_redirects,
-        .compression_init = false,
-        .response = switch (options.header_strategy) {
-            .dynamic => |max| Response.initDynamic(max),
-            .static => |buf| Response.initStatic(buf),
+        .response = .{
+            .parser = switch (options.header_strategy) {
+                .dynamic => |max| proto.HeadersParser.initDynamic(max),
+                .static => |buf| proto.HeadersParser.initStatic(buf),
+            },
         },
         .arena = undefined,
     };
@@ -358,6 +747,7 @@ pub fn request(client: *Client, uri: Uri, headers: Request.Headers, options: Req
             try writer.writeAll("\r\nConnection: keep-alive");
         }
         try writer.writeAll("\r\nAccept-Encoding: gzip, deflate, zstd");
+        try writer.writeAll("\r\nTE: trailers, gzip, deflate");
 
         switch (headers.transfer_encoding) {
             .chunked => try writer.writeAll("\r\nTransfer-Encoding: chunked"),
lib/std/http/protocol.zig
@@ -0,0 +1,714 @@
+const std = @import("std");
+const testing = std.testing;
+const mem = std.mem;
+
+const assert = std.debug.assert;
+
+pub const State = enum {
+    /// Begin header parsing states.
+    invalid,
+    start,
+    seen_n,
+    seen_r,
+    seen_rn,
+    seen_rnr,
+    finished,
+    /// Begin transfer-encoding: chunked parsing states.
+    chunk_head_size,
+    chunk_head_ext,
+    chunk_head_r,
+    chunk_data,
+    chunk_data_suffix,
+    chunk_data_suffix_r,
+
+    pub fn isContent(self: State) bool {
+        return switch (self) {
+            .invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => false,
+            .finished, .chunk_head_size, .chunk_head_ext, .chunk_head_r, .chunk_data, .chunk_data_suffix, .chunk_data_suffix_r => true,
+        };
+    }
+};
+
+const read_buffer_size = 0x4000;
+const ReadBufferIndex = std.math.IntFittingRange(0, read_buffer_size);
+
+pub const HeadersParser = struct {
+    state: State = .start,
+    /// Wether or not `header_bytes` is allocated or was provided as a fixed buffer.
+    header_bytes_owned: bool,
+    /// Either a fixed buffer of len `max_header_bytes` or a dynamic buffer that can grow up to `max_header_bytes`.
+    /// Pointers into this buffer are not stable until after a message is complete.
+    header_bytes: std.ArrayListUnmanaged(u8),
+    /// The maximum allowed size of `header_bytes`.
+    max_header_bytes: usize,
+    next_chunk_length: u64 = 0,
+    /// Wether this parser is done parsing a complete message.
+    /// A message is only done when the entire payload has been read
+    done: bool = false,
+
+    read_buffer: [read_buffer_size]u8 = undefined,
+    read_buffer_start: ReadBufferIndex = 0,
+    read_buffer_len: ReadBufferIndex = 0,
+
+    pub fn initDynamic(max: usize) HeadersParser {
+        return .{
+            .header_bytes = .{},
+            .max_header_bytes = max,
+            .header_bytes_owned = true,
+        };
+    }
+
+    pub fn initStatic(buf: []u8) HeadersParser {
+        return .{
+            .header_bytes = .{ .items = buf[0..0], .capacity = buf.len },
+            .max_header_bytes = buf.len,
+            .header_bytes_owned = false,
+        };
+    }
+
+    pub fn reset(r: *HeadersParser) void {
+        r.header_bytes.clearRetainingCapacity();
+
+        r.* = .{
+            .header_bytes = r.header_bytes,
+            .max_header_bytes = r.max_header_bytes,
+            .header_bytes_owned = r.header_bytes_owned,
+        };
+    }
+
+    /// Returns how many bytes are part of HTTP headers. Always less than or
+    /// equal to bytes.len. If the amount returned is less than bytes.len, it
+    /// means the headers ended and the first byte after the double \r\n\r\n is
+    /// located at `bytes[result]`.
+    pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
+        const vector_len = 16;
+        const len = @truncate(u32, bytes.len);
+        var index: u32 = 0;
+
+        while (true) {
+            switch (r.state) {
+                .invalid => unreachable,
+                .finished => return index,
+                .start => switch (len - index) {
+                    0 => return index,
+                    1 => {
+                        switch (bytes[index]) {
+                            '\r' => r.state = .seen_r,
+                            '\n' => r.state = .seen_n,
+                            else => {},
+                        }
+
+                        return index + 1;
+                    },
+                    2 => {
+                        const b16 = int16(bytes[index..][0..2]);
+                        const b8 = intShift(u8, b16);
+
+                        switch (b8) {
+                            '\r' => r.state = .seen_r,
+                            '\n' => r.state = .seen_n,
+                            else => {},
+                        }
+
+                        switch (b16) {
+                            int16("\r\n") => r.state = .seen_rn,
+                            int16("\n\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        return index + 2;
+                    },
+                    3 => {
+                        const b24 = int24(bytes[index..][0..3]);
+                        const b16 = intShift(u16, b24);
+                        const b8 = intShift(u8, b24);
+
+                        switch (b8) {
+                            '\r' => r.state = .seen_r,
+                            '\n' => r.state = .seen_n,
+                            else => {},
+                        }
+
+                        switch (b16) {
+                            int16("\r\n") => r.state = .seen_rn,
+                            int16("\n\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        switch (b24) {
+                            int24("\r\n\r") => r.state = .seen_rnr,
+                            else => {},
+                        }
+
+                        return index + 3;
+                    },
+                    4...vector_len - 1 => {
+                        const b32 = int32(bytes[index..][0..4]);
+                        const b24 = intShift(u24, b32);
+                        const b16 = intShift(u16, b32);
+                        const b8 = intShift(u8, b32);
+
+                        switch (b8) {
+                            '\r' => r.state = .seen_r,
+                            '\n' => r.state = .seen_n,
+                            else => {},
+                        }
+
+                        switch (b16) {
+                            int16("\r\n") => r.state = .seen_rn,
+                            int16("\n\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        switch (b24) {
+                            int24("\r\n\r") => r.state = .seen_rnr,
+                            else => {},
+                        }
+
+                        switch (b32) {
+                            int32("\r\n\r\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        index += 4;
+                        continue;
+                    },
+                    else => {
+                        const Vector = @Vector(vector_len, u8);
+                        // const BoolVector = @Vector(vector_len, bool);
+                        const BitVector = @Vector(vector_len, u1);
+                        const SizeVector = @Vector(vector_len, u8);
+
+                        const chunk = bytes[index..][0..vector_len];
+                        const v: Vector = chunk.*;
+                        const matches_r = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\r')));
+                        const matches_n = @bitCast(BitVector, v == @splat(vector_len, @as(u8, '\n')));
+                        const matches_or: SizeVector = matches_r | matches_n;
+
+                        const matches = @reduce(.Add, matches_or);
+                        switch (matches) {
+                            0 => {},
+                            1 => switch (chunk[vector_len - 1]) {
+                                '\r' => r.state = .seen_r,
+                                '\n' => r.state = .seen_n,
+                                else => {},
+                            },
+                            2 => {
+                                const b16 = int16(chunk[vector_len - 2 ..][0..2]);
+                                const b8 = intShift(u8, b16);
+
+                                switch (b8) {
+                                    '\r' => r.state = .seen_r,
+                                    '\n' => r.state = .seen_n,
+                                    else => {},
+                                }
+
+                                switch (b16) {
+                                    int16("\r\n") => r.state = .seen_rn,
+                                    int16("\n\n") => r.state = .finished,
+                                    else => {},
+                                }
+                            },
+                            3 => {
+                                const b24 = int24(chunk[vector_len - 3 ..][0..3]);
+                                const b16 = intShift(u16, b24);
+                                const b8 = intShift(u8, b24);
+
+                                switch (b8) {
+                                    '\r' => r.state = .seen_r,
+                                    '\n' => r.state = .seen_n,
+                                    else => {},
+                                }
+
+                                switch (b16) {
+                                    int16("\r\n") => r.state = .seen_rn,
+                                    int16("\n\n") => r.state = .finished,
+                                    else => {},
+                                }
+
+                                switch (b24) {
+                                    int24("\r\n\r") => r.state = .seen_rnr,
+                                    else => {},
+                                }
+                            },
+                            4...vector_len - 1 => {
+                                for (0..vector_len - 4) |i_usize| {
+                                    const i = @truncate(u32, i_usize);
+
+                                    const b32 = int32(chunk[i..][0..4]);
+                                    const b16 = intShift(u16, b32);
+
+                                    if (b32 == int32("\r\n\r\n")) {
+                                        r.state = .finished;
+                                        return index + i + 4;
+                                    } else if (b16 == int16("\n\n")) {
+                                        r.state = .finished;
+                                        return index + i + 2;
+                                    }
+                                }
+                            },
+                            else => unreachable,
+                        }
+
+                        index += vector_len;
+                        continue;
+                    },
+                },
+                .seen_n => switch (len - index) {
+                    0 => return index,
+                    else => {
+                        switch (bytes[index]) {
+                            '\n' => r.state = .finished,
+                            else => r.state = .start,
+                        }
+
+                        index += 1;
+                        continue;
+                    },
+                },
+                .seen_r => switch (len - index) {
+                    0 => return index,
+                    1 => {
+                        switch (bytes[index]) {
+                            '\n' => r.state = .seen_rn,
+                            '\r' => r.state = .seen_r,
+                            else => r.state = .start,
+                        }
+
+                        return index + 1;
+                    },
+                    2 => {
+                        const b16 = int16(bytes[index..][0..2]);
+                        const b8 = intShift(u8, b16);
+
+                        switch (b8) {
+                            '\r' => r.state = .seen_r,
+                            '\n' => r.state = .seen_rn,
+                            else => r.state = .start,
+                        }
+
+                        switch (b16) {
+                            int16("\r\n") => r.state = .seen_rn,
+                            int16("\n\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        return index + 2;
+                    },
+                    else => {
+                        const b24 = int24(bytes[index..][0..3]);
+                        const b16 = intShift(u16, b24);
+                        const b8 = intShift(u8, b24);
+
+                        switch (b8) {
+                            '\r' => r.state = .seen_r,
+                            '\n' => r.state = .seen_n,
+                            else => r.state = .start,
+                        }
+
+                        switch (b16) {
+                            int16("\r\n") => r.state = .seen_rn,
+                            int16("\n\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        switch (b24) {
+                            int24("\n\r\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        index += 3;
+                        continue;
+                    },
+                },
+                .seen_rn => switch (len - index) {
+                    0 => return index,
+                    1 => {
+                        switch (bytes[index]) {
+                            '\r' => r.state = .seen_rnr,
+                            '\n' => r.state = .seen_n,
+                            else => r.state = .start,
+                        }
+
+                        return index + 1;
+                    },
+                    else => {
+                        const b16 = int16(bytes[index..][0..2]);
+                        const b8 = intShift(u8, b16);
+
+                        switch (b8) {
+                            '\r' => r.state = .seen_rnr,
+                            '\n' => r.state = .seen_n,
+                            else => r.state = .start,
+                        }
+
+                        switch (b16) {
+                            int16("\r\n") => r.state = .finished,
+                            int16("\n\n") => r.state = .finished,
+                            else => {},
+                        }
+
+                        index += 2;
+                        continue;
+                    },
+                },
+                .seen_rnr => switch (len - index) {
+                    0 => return index,
+                    else => {
+                        switch (bytes[index]) {
+                            '\n' => r.state = .finished,
+                            else => r.state = .start,
+                        }
+
+                        index += 1;
+                        continue;
+                    },
+                },
+                .chunk_head_size => unreachable,
+                .chunk_head_ext => unreachable,
+                .chunk_head_r => unreachable,
+                .chunk_data => unreachable,
+                .chunk_data_suffix => unreachable,
+                .chunk_data_suffix_r => unreachable,
+            }
+
+            return index;
+        }
+    }
+
+    pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
+        const len = @truncate(u32, bytes.len);
+
+        for (bytes[0..], 0..) |c, i| {
+            const index = @intCast(u32, i);
+            switch (r.state) {
+                .chunk_data_suffix => switch (c) {
+                    '\r' => r.state = .chunk_data_suffix_r,
+                    '\n' => r.state = .chunk_head_size,
+                    else => {
+                        r.state = .invalid;
+                        return index;
+                    },
+                },
+                .chunk_data_suffix_r => switch (c) {
+                    '\n' => r.state = .chunk_head_size,
+                    else => {
+                        r.state = .invalid;
+                        return index;
+                    },
+                },
+                .chunk_head_size => {
+                    const digit = switch (c) {
+                        '0'...'9' => |b| b - '0',
+                        'A'...'Z' => |b| b - 'A' + 10,
+                        'a'...'z' => |b| b - 'a' + 10,
+                        '\r' => {
+                            r.state = .chunk_head_r;
+                            continue;
+                        },
+                        '\n' => {
+                            r.state = .chunk_data;
+                            return index + 1;
+                        },
+                        else => {
+                            r.state = .chunk_head_ext;
+                            continue;
+                        },
+                    };
+
+                    const new_len = r.next_chunk_length *% 16 +% digit;
+                    if (new_len <= r.next_chunk_length and r.next_chunk_length != 0) {
+                        r.state = .invalid;
+                        return index;
+                    }
+
+                    r.next_chunk_length = new_len;
+                },
+                .chunk_head_ext => switch (c) {
+                    '\r' => r.state = .chunk_head_r,
+                    '\n' => {
+                        r.state = .chunk_data;
+                        return index + 1;
+                    },
+                    else => continue,
+                },
+                .chunk_head_r => switch (c) {
+                    '\n' => {
+                        r.state = .chunk_data;
+                        return index + 1;
+                    },
+                    else => {
+                        r.state = .invalid;
+                        return index;
+                    },
+                },
+                else => unreachable,
+            }
+        }
+
+        return len;
+    }
+
+    /// Returns whether or not the parser has finished parsing a complete message. A message is only complete after the
+    /// entire body has been read and any trailing headers have been parsed.
+    pub fn isComplete(r: *HeadersParser) bool {
+        return r.done and r.state == .finished;
+    }
+
+    pub const CheckCompleteHeadError = mem.Allocator.Error || error{HttpHeadersExceededSizeLimit};
+
+    /// Pumps `in` bytes into the parser. Returns the number of bytes consumed. This function will return 0 if the parser
+    /// is not in a state to parse more headers.
+    pub fn checkCompleteHead(r: *HeadersParser, allocator: std.mem.Allocator, in: []const u8) CheckCompleteHeadError!u32 {
+        if (r.state.isContent()) return 0;
+
+        const i = r.findHeadersEnd(in);
+        const data = in[0..i];
+        if (r.header_bytes.items.len + data.len > r.max_header_bytes) {
+            return error.HttpHeadersExceededSizeLimit;
+        } else {
+            if (r.header_bytes_owned) try r.header_bytes.ensureUnusedCapacity(allocator, data.len);
+
+            r.header_bytes.appendSliceAssumeCapacity(data);
+        }
+
+        return i;
+    }
+
+    /// Set of errors that `waitForCompleteHead` can throw except any errors inherited by `reader`
+    pub const WaitForCompleteHeadError = CheckCompleteHeadError || error{UnexpectedEndOfStream};
+
+    /// Waits for the complete head to be available. This function will continue trying to read until the head is complete
+    /// or an error occurs.
+    pub fn waitForCompleteHead(r: *HeadersParser, reader: anytype, allocator: std.mem.Allocator) !void {
+        if (r.state.isContent()) return;
+
+        while (true) {
+            if (r.read_buffer_start == r.read_buffer_len) {
+                const nread = try reader.read(r.read_buffer[0..]);
+                if (nread == 0) return error.UnexpectedEndOfStream;
+
+                r.read_buffer_start = 0;
+                r.read_buffer_len = @intCast(ReadBufferIndex, nread);
+            }
+
+            const amt = try r.checkCompleteHead(allocator, r.read_buffer[r.read_buffer_start..r.read_buffer_len]);
+            r.read_buffer_start += @intCast(ReadBufferIndex, amt);
+
+            if (amt != 0) return;
+        }
+    }
+
+    pub const ReadError = error{
+        UnexpectedEndOfStream,
+        HttpHeadersExceededSizeLimit,
+        HttpChunkInvalid,
+    };
+
+    /// Reads the body of the message into `buffer`. If `skip` is true, the buffer will be unused and the body will be
+    /// skipped. Returns the number of bytes placed in the buffer.
+    pub fn read(r: *HeadersParser, reader: anytype, buffer: []u8, skip: bool) !usize {
+        assert(r.state.isContent());
+        if (r.done) return 0;
+
+        if (r.read_buffer_start == r.read_buffer_len) {
+            const nread = try reader.read(r.read_buffer[0..]);
+            if (nread == 0) return error.UnexpectedEndOfStream;
+
+            r.read_buffer_start = 0;
+            r.read_buffer_len = @intCast(ReadBufferIndex, nread);
+        }
+
+        var out_index: usize = 0;
+        while (true) {
+            switch (r.state) {
+                .invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => unreachable,
+                .finished => {
+                    const buf_avail = r.read_buffer_len - r.read_buffer_start;
+                    const data_avail = r.next_chunk_length;
+                    const out_avail = buffer.len;
+
+                    // TODO https://github.com/ziglang/zig/issues/14039
+                    const read_available = @intCast(usize, @min(buf_avail, data_avail));
+                    if (skip) {
+                        r.next_chunk_length -= read_available;
+                        r.read_buffer_start += @intCast(ReadBufferIndex, read_available);
+                    } else {
+                        const can_read = @min(read_available, out_avail);
+                        r.next_chunk_length -= can_read;
+
+                        mem.copy(u8, buffer[out_index..], r.read_buffer[r.read_buffer_start..][0..can_read]);
+                        r.read_buffer_start += @intCast(ReadBufferIndex, can_read);
+                        out_index += can_read;
+                    }
+
+                    if (r.next_chunk_length == 0) r.done = true;
+
+                    return out_index;
+                },
+                .chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
+                    const i = r.findChunkedLen(r.read_buffer[r.read_buffer_start..r.read_buffer_len]);
+                    r.read_buffer_start += @intCast(ReadBufferIndex, i);
+
+                    switch (r.state) {
+                        .invalid => return error.HttpChunkInvalid,
+                        .chunk_data => if (r.next_chunk_length == 0) {
+                            // The trailer section is formatted identically to the header section.
+                            r.state = .seen_rn;
+                            r.done = true;
+
+                            return out_index;
+                        },
+                        else => return out_index,
+                    }
+
+                    continue;
+                },
+                .chunk_data => {
+                    const buf_avail = r.read_buffer_len - r.read_buffer_start;
+                    const data_avail = r.next_chunk_length;
+                    const out_avail = buffer.len;
+
+                    // TODO https://github.com/ziglang/zig/issues/14039
+                    const read_available = @intCast(usize, @min(buf_avail, data_avail));
+                    if (skip) {
+                        r.next_chunk_length -= read_available;
+                        r.read_buffer_start += @intCast(ReadBufferIndex, read_available);
+                    } else {
+                        const can_read = @min(read_available, out_avail);
+                        r.next_chunk_length -= can_read;
+
+                        mem.copy(u8, buffer[out_index..], r.read_buffer[r.read_buffer_start..][0..can_read]);
+                        r.read_buffer_start += @intCast(ReadBufferIndex, can_read);
+                        out_index += can_read;
+                    }
+
+                    if (r.next_chunk_length == 0) {
+                        r.state = .chunk_data_suffix;
+                        continue;
+                    }
+
+                    return out_index;
+                },
+            }
+        }
+    }
+};
+
+inline fn int16(array: *const [2]u8) u16 {
+    return @bitCast(u16, array.*);
+}
+
+inline fn int24(array: *const [3]u8) u24 {
+    return @bitCast(u24, array.*);
+}
+
+inline fn int32(array: *const [4]u8) u32 {
+    return @bitCast(u32, array.*);
+}
+
+inline fn intShift(comptime T: type, x: anytype) T {
+    switch (@import("builtin").cpu.arch.endian()) {
+        .Little => return @truncate(T, x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T))),
+        .Big => return @truncate(T, x),
+    }
+}
+
+test "HeadersParser.findHeadersEnd" {
+    var r: HeadersParser = undefined;
+    const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\nHello";
+
+    for (0..36) |i| {
+        r = HeadersParser.initDynamic(0);
+        try std.testing.expectEqual(@intCast(u32, i), r.findHeadersEnd(data[0..i]));
+        try std.testing.expectEqual(@intCast(u32, 35 - i), r.findHeadersEnd(data[i..]));
+    }
+}
+
+test "HeadersParser.findChunkedLen" {
+    var r: HeadersParser = undefined;
+    const data = "Ff\r\nf0f000 ; ext\n0\r\nffffffffffffffffffffffffffffffffffffffff\r\n";
+
+    r = HeadersParser.initDynamic(0);
+    r.state = .chunk_head_size;
+    r.next_chunk_length = 0;
+
+    const first = r.findChunkedLen(data[0..]);
+    try testing.expectEqual(@as(u32, 4), first);
+    try testing.expectEqual(@as(u64, 0xff), r.next_chunk_length);
+    try testing.expectEqual(State.chunk_data, r.state);
+    r.state = .chunk_head_size;
+    r.next_chunk_length = 0;
+
+    const second = r.findChunkedLen(data[first..]);
+    try testing.expectEqual(@as(u32, 13), second);
+    try testing.expectEqual(@as(u64, 0xf0f000), r.next_chunk_length);
+    try testing.expectEqual(State.chunk_data, r.state);
+    r.state = .chunk_head_size;
+    r.next_chunk_length = 0;
+
+    const third = r.findChunkedLen(data[first + second ..]);
+    try testing.expectEqual(@as(u32, 3), third);
+    try testing.expectEqual(@as(u64, 0), r.next_chunk_length);
+    try testing.expectEqual(State.chunk_data, r.state);
+    r.state = .chunk_head_size;
+    r.next_chunk_length = 0;
+
+    const fourth = r.findChunkedLen(data[first + second + third ..]);
+    try testing.expectEqual(@as(u32, 16), fourth);
+    try testing.expectEqual(@as(u64, 0xffffffffffffffff), r.next_chunk_length);
+    try testing.expectEqual(State.invalid, r.state);
+}
+
+test "HeadersParser.read length" {
+    var r = HeadersParser.initDynamic(256);
+    defer r.header_bytes.deinit(std.testing.allocator);
+    const data = "GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\nHello";
+    var fbs = std.io.fixedBufferStream(data);
+
+    try r.waitForCompleteHead(fbs.reader(), std.testing.allocator);
+    var buf: [8]u8 = undefined;
+
+    r.next_chunk_length = 5;
+    const len = try r.read(fbs.reader(), &buf, false);
+    try std.testing.expectEqual(@as(usize, 5), len);
+    try std.testing.expectEqualStrings("Hello", buf[0..len]);
+
+    try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\n", r.header_bytes.items);
+}
+
+test "HeadersParser.read chunked" {
+    var r = HeadersParser.initDynamic(256);
+    defer r.header_bytes.deinit(std.testing.allocator);
+    const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\n\r\n";
+    var fbs = std.io.fixedBufferStream(data);
+
+    try r.waitForCompleteHead(fbs.reader(), std.testing.allocator);
+    var buf: [8]u8 = undefined;
+
+    r.state = .chunk_head_size;
+    const len = try r.read(fbs.reader(), &buf, false);
+    try std.testing.expectEqual(@as(usize, 5), len);
+    try std.testing.expectEqualStrings("Hello", buf[0..len]);
+
+    try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n", r.header_bytes.items);
+}
+
+test "HeadersParser.read chunked trailer" {
+    var r = HeadersParser.initDynamic(256);
+    defer r.header_bytes.deinit(std.testing.allocator);
+    const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\nContent-Type: text/plain\r\n\r\n";
+    var fbs = std.io.fixedBufferStream(data);
+
+    try r.waitForCompleteHead(fbs.reader(), std.testing.allocator);
+    var buf: [8]u8 = undefined;
+
+    r.state = .chunk_head_size;
+    const len = try r.read(fbs.reader(), &buf, false);
+    try std.testing.expectEqual(@as(usize, 5), len);
+    try std.testing.expectEqualStrings("Hello", buf[0..len]);
+
+    try r.waitForCompleteHead(fbs.reader(), std.testing.allocator);
+
+    try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\nContent-Type: text/plain\r\n\r\n", r.header_bytes.items);
+}
lib/std/http/Server.zig
@@ -0,0 +1,495 @@
+const std = @import("../std.zig");
+const testing = std.testing;
+const http = std.http;
+const mem = std.mem;
+const net = std.net;
+const Uri = std.Uri;
+const Allocator = mem.Allocator;
+const assert = std.debug.assert;
+
+const Server = @This();
+const proto = @import("protocol.zig");
+
+allocator: Allocator,
+
+socket: net.StreamServer,
+
+pub const DeflateDecompressor = std.compress.zlib.ZlibStream(Response.TransferReader);
+pub const GzipDecompressor = std.compress.gzip.Decompress(Response.TransferReader);
+pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Response.TransferReader, .{});
+
+pub const Connection = struct {
+    stream: net.Stream,
+    protocol: Protocol,
+
+    closing: bool = true,
+
+    pub const Protocol = enum { plain };
+
+    pub fn read(conn: *Connection, buffer: []u8) !usize {
+        switch (conn.protocol) {
+            .plain => return conn.stream.read(buffer),
+            // .tls => return conn.tls_client.read(conn.stream, buffer),
+        }
+    }
+
+    pub fn readAtLeast(conn: *Connection, buffer: []u8, len: usize) !usize {
+        switch (conn.protocol) {
+            .plain => return conn.stream.readAtLeast(buffer, len),
+            // .tls => return conn.tls_client.readAtLeast(conn.stream, buffer, len),
+        }
+    }
+
+    pub const ReadError = net.Stream.ReadError;
+
+    pub const Reader = std.io.Reader(*Connection, ReadError, read);
+
+    pub fn reader(conn: *Connection) Reader {
+        return Reader{ .context = conn };
+    }
+
+    pub fn writeAll(conn: *Connection, buffer: []const u8) !void {
+        switch (conn.protocol) {
+            .plain => return conn.stream.writeAll(buffer),
+            // .tls => return conn.tls_client.writeAll(conn.stream, buffer),
+        }
+    }
+
+    pub fn write(conn: *Connection, buffer: []const u8) !usize {
+        switch (conn.protocol) {
+            .plain => return conn.stream.write(buffer),
+            // .tls => return conn.tls_client.write(conn.stream, buffer),
+        }
+    }
+
+    pub const WriteError = net.Stream.WriteError || error{};
+    pub const Writer = std.io.Writer(*Connection, WriteError, write);
+
+    pub fn writer(conn: *Connection) Writer {
+        return Writer{ .context = conn };
+    }
+
+    pub fn close(conn: *Connection) void {
+        conn.stream.close();
+    }
+};
+
+pub const Request = struct {
+    pub const Headers = struct {
+        method: http.Method,
+        target: []const u8,
+        version: http.Version,
+        content_length: ?u64 = null,
+        transfer_encoding: ?http.TransferEncoding = null,
+        transfer_compression: ?http.ContentEncoding = null,
+        connection: http.Connection = .close,
+        host: ?[]const u8 = null,
+
+        pub const ParseError = error{
+            ShortHttpStatusLine,
+            BadHttpVersion,
+            UnknownHttpMethod,
+            HttpHeadersInvalid,
+            HttpHeaderContinuationsUnsupported,
+            HttpTransferEncodingUnsupported,
+            HttpConnectionHeaderUnsupported,
+            InvalidCharacter,
+        };
+
+        pub fn parse(bytes: []const u8) !Headers {
+            var it = mem.tokenize(u8, bytes[0 .. bytes.len - 4], "\r\n");
+
+            const first_line = it.next() orelse return error.HttpHeadersInvalid;
+            if (first_line.len < 10)
+                return error.ShortHttpStatusLine;
+
+            const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
+            const method_str = first_line[0..method_end];
+            const method = std.meta.stringToEnum(http.Method, method_str) orelse return error.UnknownHttpMethod;
+
+            const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
+            if (version_start == method_end) return error.HttpHeadersInvalid;
+
+            const version_str = first_line[version_start + 1 ..];
+            if (version_str.len != 8) return error.HttpHeadersInvalid;
+            const version: http.Version = switch (int64(version_str[0..8])) {
+                int64("HTTP/1.0") => .@"HTTP/1.0",
+                int64("HTTP/1.1") => .@"HTTP/1.1",
+                else => return error.BadHttpVersion,
+            };
+
+            const target = first_line[method_end + 1 .. version_start];
+
+            var headers: Headers = .{
+                .method = method,
+                .target = target,
+                .version = version,
+            };
+
+            while (it.next()) |line| {
+                if (line.len == 0) return error.HttpHeadersInvalid;
+                switch (line[0]) {
+                    ' ', '\t' => return error.HttpHeaderContinuationsUnsupported,
+                    else => {},
+                }
+
+                var line_it = mem.tokenize(u8, line, ": ");
+                const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
+                const header_value = line_it.rest();
+                if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
+                    if (headers.content_length != null) return error.HttpHeadersInvalid;
+                    headers.content_length = try std.fmt.parseInt(u64, header_value, 10);
+                } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+                    // Transfer-Encoding: second, first
+                    // Transfer-Encoding: deflate, chunked
+                    var iter = mem.splitBackwards(u8, header_value, ",");
+
+                    if (iter.next()) |first| {
+                        const trimmed = mem.trim(u8, first, " ");
+
+                        if (std.meta.stringToEnum(http.TransferEncoding, trimmed)) |te| {
+                            if (headers.transfer_encoding != null) return error.HttpHeadersInvalid;
+                            headers.transfer_encoding = te;
+                        } else if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+                            if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+                            headers.transfer_compression = ce;
+                        } else {
+                            return error.HttpTransferEncodingUnsupported;
+                        }
+                    }
+
+                    if (iter.next()) |second| {
+                        if (headers.transfer_compression != null) return error.HttpTransferEncodingUnsupported;
+
+                        const trimmed = mem.trim(u8, second, " ");
+
+                        if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+                            headers.transfer_compression = ce;
+                        } else {
+                            return error.HttpTransferEncodingUnsupported;
+                        }
+                    }
+
+                    if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
+                } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
+                    if (headers.transfer_compression != null) return error.HttpHeadersInvalid;
+
+                    const trimmed = mem.trim(u8, header_value, " ");
+
+                    if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+                        headers.transfer_compression = ce;
+                    } else {
+                        return error.HttpTransferEncodingUnsupported;
+                    }
+                } else if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
+                    if (std.ascii.eqlIgnoreCase(header_value, "keep-alive")) {
+                        headers.connection = .keep_alive;
+                    } else if (std.ascii.eqlIgnoreCase(header_value, "close")) {
+                        headers.connection = .close;
+                    } else {
+                        return error.HttpConnectionHeaderUnsupported;
+                    }
+                } else if (std.ascii.eqlIgnoreCase(header_name, "host")) {
+                    headers.host = header_value;
+                }
+            }
+
+            return headers;
+        }
+
+        inline fn int64(array: *const [8]u8) u64 {
+            return @bitCast(u64, array.*);
+        }
+    };
+
+    headers: Headers = undefined,
+    parser: proto.HeadersParser,
+    compression: Compression = .none,
+};
+
+pub const Response = struct {
+    pub const Headers = struct {
+        version: http.Version = .@"HTTP/1.1",
+        status: http.Status = .ok,
+        reason: ?[]const u8 = null,
+
+        server: ?[]const u8 = "zig (std.http)",
+        connection: http.Connection = .keep_alive,
+        transfer_encoding: RequestTransfer = .none,
+
+        custom: []const http.CustomHeader = &[_]http.CustomHeader{},
+    };
+
+    server: *Server,
+    address: net.Address,
+    connection: Connection,
+
+    headers: Headers = .{},
+    request: Request,
+
+    pub fn reset(res: *Response) void {
+        switch (res.request.compression) {
+            .none => {},
+            .deflate => |*deflate| deflate.deinit(),
+            .gzip => |*gzip| gzip.deinit(),
+            .zstd => |*zstd| zstd.deinit(),
+        }
+
+        if (!res.request.parser.done) {
+            // If the response wasn't fully read, then we need to close the connection.
+            res.connection.closing = true;
+        }
+
+        if (res.connection.closing) {
+            res.connection.close();
+
+            if (res.request.parser.header_bytes_owned) {
+                res.request.parser.header_bytes.deinit(res.server.allocator);
+            }
+
+            res.* = undefined;
+        } else {
+            res.request.parser.reset();
+        }
+    }
+
+    pub fn sendResponseHead(res: *Response) !void {
+        var buffered = std.io.bufferedWriter(res.connection.writer());
+        const w = buffered.writer();
+
+        try w.writeAll(@tagName(res.headers.version));
+        try w.writeByte(' ');
+        try w.print("{d}", .{@enumToInt(res.headers.status)});
+        try w.writeByte(' ');
+        if (res.headers.reason) |reason| {
+            try w.writeAll(reason);
+        } else if (res.headers.status.phrase()) |phrase| {
+            try w.writeAll(phrase);
+        }
+
+        if (res.headers.server) |server| {
+            try w.writeAll("\r\nServer: ");
+            try w.writeAll(server);
+        }
+
+        if (res.headers.connection == .close) {
+            try w.writeAll("\r\nConnection: close");
+        } else {
+            try w.writeAll("\r\nConnection: keep-alive");
+        }
+
+        switch (res.headers.transfer_encoding) {
+            .chunked => try w.writeAll("\r\nTransfer-Encoding: chunked"),
+            .content_length => |content_length| try w.print("\r\nContent-Length: {d}", .{content_length}),
+            .none => {},
+        }
+
+        for (res.headers.custom) |header| {
+            try w.writeAll("\r\n");
+            try w.writeAll(header.name);
+            try w.writeAll(": ");
+            try w.writeAll(header.value);
+        }
+
+        try w.writeAll("\r\n\r\n");
+
+        try buffered.flush();
+    }
+
+    pub const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
+
+    pub const TransferReader = std.io.Reader(*Response, TransferReadError, transferRead);
+
+    pub fn transferReader(res: *Response) TransferReader {
+        return .{ .context = res };
+    }
+
+    pub fn transferRead(res: *Response, buf: []u8) TransferReadError!usize {
+        if (res.request.parser.isComplete()) return 0;
+
+        var index: usize = 0;
+        while (index == 0) {
+            const amt = try res.request.parser.read(res.connection.reader(), buf[index..], false);
+            if (amt == 0 and res.request.parser.isComplete()) break;
+            index += amt;
+        }
+
+        return index;
+    }
+
+    pub const WaitForCompleteHeadError = Connection.ReadError || proto.HeadersParser.WaitForCompleteHeadError || Request.Headers.ParseError || error{ BadHeader, InvalidCompression, StreamTooLong, InvalidWindowSize } || error{CompressionNotSupported};
+
+    pub fn waitForCompleteHead(res: *Response) !void {
+        try res.request.parser.waitForCompleteHead(res.connection.reader(), res.server.allocator);
+
+        res.request.headers = try Request.Headers.parse(res.request.parser.header_bytes.items);
+
+        if (res.headers.connection == .keep_alive and res.request.headers.connection == .keep_alive) {
+            res.connection.closing = false;
+        } else {
+            res.connection.closing = true;
+        }
+
+        if (res.request.headers.transfer_encoding) |te| {
+            switch (te) {
+                .chunked => {
+                    res.request.parser.next_chunk_length = 0;
+                    res.request.parser.state = .chunk_head_size;
+                },
+            }
+        } else if (res.request.headers.content_length) |cl| {
+            res.request.parser.next_chunk_length = cl;
+
+            if (cl == 0) res.request.parser.done = true;
+        } else {
+            res.request.parser.done = true;
+        }
+
+        if (!res.request.parser.done) {
+            if (res.request.headers.transfer_compression) |tc| switch (tc) {
+                .compress => return error.CompressionNotSupported,
+                .deflate => res.request.compression = .{
+                    .deflate = try std.compress.zlib.zlibStream(res.server.allocator, res.transferReader()),
+                },
+                .gzip => res.request.compression = .{
+                    .gzip = try std.compress.gzip.decompress(res.server.allocator, res.transferReader()),
+                },
+                .zstd => res.request.compression = .{
+                    .zstd = std.compress.zstd.decompressStream(res.server.allocator, res.transferReader()),
+                },
+            };
+        }
+    }
+
+    pub const ReadError = DeflateDecompressor.Error || GzipDecompressor.Error || ZstdDecompressor.Error || WaitForCompleteHeadError;
+
+    pub const Reader = std.io.Reader(*Response, ReadError, read);
+
+    pub fn reader(res: *Response) Reader {
+        return .{ .context = res };
+    }
+
+    pub fn read(res: *Response, buffer: []u8) ReadError!usize {
+        return switch (res.request.compression) {
+            .deflate => |*deflate| try deflate.read(buffer),
+            .gzip => |*gzip| try gzip.read(buffer),
+            .zstd => |*zstd| try zstd.read(buffer),
+            else => try res.transferRead(buffer),
+        };
+    }
+
+    pub fn readAll(res: *Response, buffer: []u8) !usize {
+        var index: usize = 0;
+        while (index < buffer.len) {
+            const amt = try read(res, buffer[index..]);
+            if (amt == 0) break;
+            index += amt;
+        }
+        return index;
+    }
+
+    pub const WriteError = Connection.WriteError || error{ NotWriteable, MessageTooLong };
+
+    pub const Writer = std.io.Writer(*Response, WriteError, write);
+
+    pub fn writer(res: *Response) Writer {
+        return .{ .context = res };
+    }
+
+    /// Write `bytes` to the server. The `transfer_encoding` request header determines how data will be sent.
+    pub fn write(res: *Response, bytes: []const u8) WriteError!usize {
+        switch (res.headers.transfer_encoding) {
+            .chunked => {
+                try res.connection.writer().print("{x}\r\n", .{bytes.len});
+                try res.connection.writeAll(bytes);
+                try res.connection.writeAll("\r\n");
+
+                return bytes.len;
+            },
+            .content_length => |*len| {
+                if (len.* < bytes.len) return error.MessageTooLong;
+
+                const amt = try res.connection.write(bytes);
+                len.* -= amt;
+                return amt;
+            },
+            .none => return error.NotWriteable,
+        }
+    }
+
+    /// Finish the body of a request. This notifies the server that you have no more data to send.
+    pub fn finish(res: *Response) !void {
+        switch (res.headers.transfer_encoding) {
+            .chunked => try res.connection.writeAll("0\r\n"),
+            .content_length => |len| if (len != 0) return error.MessageNotCompleted,
+            .none => {},
+        }
+    }
+};
+
+pub const RequestTransfer = union(enum) {
+    content_length: u64,
+    chunked: void,
+    none: void,
+};
+
+pub const Compression = union(enum) {
+    deflate: DeflateDecompressor,
+    gzip: GzipDecompressor,
+    zstd: ZstdDecompressor,
+    none: void,
+};
+
+pub fn init(allocator: Allocator, options: net.StreamServer.Options) Server {
+    return .{
+        .allocator = allocator,
+        .socket = net.StreamServer.init(options),
+    };
+}
+
+pub fn deinit(server: *Server) void {
+    server.socket.deinit();
+}
+
+pub const ListenError = std.os.SocketError || std.os.BindError || std.os.ListenError || std.os.SetSockOptError || std.os.GetSockNameError;
+
+pub fn listen(server: *Server, address: net.Address) !void {
+    try server.socket.listen(address);
+}
+
+pub const AcceptError = net.StreamServer.AcceptError || Allocator.Error;
+
+pub const HeaderStrategy = union(enum) {
+    /// In this case, the client's Allocator will be used to store the
+    /// entire HTTP header. This value is the maximum total size of
+    /// HTTP headers allowed, otherwise
+    /// error.HttpHeadersExceededSizeLimit is returned from read().
+    dynamic: usize,
+    /// This is used to store the entire HTTP header. If the HTTP
+    /// header is too big to fit, `error.HttpHeadersExceededSizeLimit`
+    /// is returned from read(). When this is used, `error.OutOfMemory`
+    /// cannot be returned from `read()`.
+    static: []u8,
+};
+
+pub fn accept(server: *Server, options: HeaderStrategy) AcceptError!*Response {
+    const in = try server.socket.accept();
+
+    const res = try server.allocator.create(Response);
+    res.* = .{
+        .server = server,
+        .address = in.address,
+        .connection = .{
+            .stream = in.stream,
+            .protocol = .plain,
+        },
+        .request = .{
+            .parser = switch (options) {
+                .dynamic => |max| proto.HeadersParser.initDynamic(max),
+                .static => |buf| proto.HeadersParser.initStatic(buf),
+            },
+        },
+    };
+
+    return res;
+}
lib/std/http.zig
@@ -1,4 +1,6 @@
 pub const Client = @import("http/Client.zig");
+pub const Server = @import("http/Server.zig");
+pub const protocol = @import("http/protocol.zig");
 
 pub const Version = enum {
     @"HTTP/1.0",