Commit 4d401e6159
Changed files (8)
lib
test
standalone
lib/std/http/Client.zig
@@ -33,13 +33,14 @@ next_https_rescan_certs: bool = true,
/// The pool of connections that can be reused (and currently in use).
connection_pool: ConnectionPool = .{},
-/// This is the proxy that will handle http:// connections. It *must not* be
-/// modified when the client has any active connections.
-http_proxy: ?Proxy = null,
-
-/// This is the proxy that will handle https:// connections. It *must not* be
-/// modified when the client has any active connections.
-https_proxy: ?Proxy = null,
+/// If populated, all http traffic travels through this third party.
+/// This field cannot be modified while the client has active connections.
+/// Pointer to externally-owned memory.
+http_proxy: ?*Proxy = null,
+/// If populated, all https traffic travels through this third party.
+/// This field cannot be modified while the client has active connections.
+/// Pointer to externally-owned memory.
+https_proxy: ?*Proxy = null,
/// A set of linked lists of connections that can be reused.
pub const ConnectionPool = struct {
@@ -422,7 +423,7 @@ pub const Response = struct {
HttpTransferEncodingUnsupported,
HttpConnectionHeaderUnsupported,
InvalidContentLength,
- CompressionNotSupported,
+ CompressionUnsupported,
};
pub fn parse(res: *Response, bytes: []const u8, trailing: bool) ParseError!void {
@@ -445,8 +446,6 @@ pub const Response = struct {
res.status = status;
res.reason = reason;
- res.headers.clearRetainingCapacity();
-
while (it.next()) |line| {
if (line.len == 0) return error.HttpHeadersInvalid;
switch (line[0]) {
@@ -458,11 +457,17 @@ pub const Response = struct {
const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
const header_value = line_it.rest();
- try res.headers.append(header_name, header_value);
-
if (trailing) continue;
- if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
+ if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
+ res.keep_alive = !std.ascii.eqlIgnoreCase(header_value, "close");
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-type")) {
+ res.content_type = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "location")) {
+ res.location = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-disposition")) {
+ res.content_disposition = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
// Transfer-Encoding: second, first
// Transfer-Encoding: deflate, chunked
var iter = mem.splitBackwardsScalar(u8, header_value, ',');
@@ -531,15 +536,19 @@ pub const Response = struct {
try expectEqual(@as(u10, 999), parseInt3("999"));
}
- /// The HTTP version this response is using.
version: http.Version,
-
- /// The status code of the response.
status: http.Status,
-
- /// The reason phrase of the response.
reason: []const u8,
+ /// Points into the user-provided `server_header_buffer`.
+ location: ?[]const u8 = null,
+ /// Points into the user-provided `server_header_buffer`.
+ content_type: ?[]const u8 = null,
+ /// Points into the user-provided `server_header_buffer`.
+ content_disposition: ?[]const u8 = null,
+
+ keep_alive: bool = false,
+
/// If present, the number of bytes in the response body.
content_length: ?u64 = null,
@@ -549,12 +558,11 @@ pub const Response = struct {
/// If present, the compression of the response body, otherwise identity (no compression).
transfer_compression: http.ContentEncoding = .identity,
- /// The headers received from the server.
- headers: http.Headers,
parser: proto.HeadersParser,
compression: Compression = .none,
- /// Whether the response body should be skipped. Any data read from the response body will be discarded.
+ /// Whether the response body should be skipped. Any data read from the
+ /// response body will be discarded.
skip: bool = false,
};
@@ -562,24 +570,15 @@ pub const Response = struct {
///
/// Order of operations: open -> send[ -> write -> finish] -> wait -> read
pub const Request = struct {
- /// The uri that this request is being sent to.
uri: Uri,
-
- /// The client that this request was created from.
client: *Client,
-
- /// Underlying connection to the server. This is null when the connection is released.
+ /// This is null when the connection is released.
connection: ?*Connection,
+ keep_alive: bool,
method: http.Method,
version: http.Version = .@"HTTP/1.1",
-
- /// The list of HTTP request headers.
- headers: http.Headers,
-
- /// The transfer encoding of the request body.
- transfer_encoding: RequestTransfer = .none,
-
+ transfer_encoding: RequestTransfer,
redirect_behavior: RedirectBehavior,
/// Whether the request should handle a 100-continue response before sending the request body.
@@ -593,6 +592,34 @@ pub const Request = struct {
/// Used as a allocator for resolving redirects locations.
arena: std.heap.ArenaAllocator,
+ /// Standard headers that have default, but overridable, behavior.
+ headers: Headers,
+
+ /// These headers are kept including when following a redirect to a
+ /// different domain.
+ /// Externally-owned; must outlive the Request.
+ extra_headers: []const http.Header,
+
+ /// These headers are stripped when following a redirect to a different
+ /// domain.
+ /// Externally-owned; must outlive the Request.
+ privileged_headers: []const http.Header,
+
+ pub const Headers = struct {
+ host: Value = .default,
+ authorization: Value = .default,
+ user_agent: Value = .default,
+ connection: Value = .default,
+ accept_encoding: Value = .default,
+ content_type: Value = .default,
+
+ pub const Value = union(enum) {
+ default,
+ omit,
+ override: []const u8,
+ };
+ };
+
/// Any value other than `not_allowed` or `unhandled` means that integer represents
/// how many remaining redirects are allowed.
pub const RedirectBehavior = enum(u16) {
@@ -621,9 +648,6 @@ pub const Request = struct {
.zstd => |*zstd| zstd.deinit(),
}
- req.headers.deinit();
- req.response.headers.deinit();
-
if (req.connection) |connection| {
if (req.response.parser.state != .complete) {
// If the response wasn't fully read, then we need to close the connection.
@@ -664,14 +688,12 @@ pub const Request = struct {
req.uri = uri;
req.connection = try req.client.connect(host, port, protocol);
req.redirect_behavior.subtractOne();
- req.response.headers.clearRetainingCapacity();
req.response.parser.reset();
req.response = .{
.status = undefined,
.reason = undefined,
.version = undefined,
- .headers = req.response.headers,
.parser = req.response.parser,
};
}
@@ -685,9 +707,11 @@ pub const Request = struct {
/// Send the HTTP request headers to the server.
pub fn send(req: *Request, options: SendOptions) SendError!void {
- if (!req.method.requestHasBody() and req.transfer_encoding != .none) return error.UnsupportedTransferEncoding;
+ if (!req.method.requestHasBody() and req.transfer_encoding != .none)
+ return error.UnsupportedTransferEncoding;
- const w = req.connection.?.writer();
+ const connection = req.connection.?;
+ const w = connection.writer();
try req.method.write(w);
try w.writeByte(' ');
@@ -696,9 +720,9 @@ pub const Request = struct {
try req.uri.writeToStream(.{ .authority = true }, w);
} else {
try req.uri.writeToStream(.{
- .scheme = req.connection.?.proxied,
- .authentication = req.connection.?.proxied,
- .authority = req.connection.?.proxied,
+ .scheme = connection.proxied,
+ .authentication = connection.proxied,
+ .authority = connection.proxied,
.path = true,
.query = true,
.raw = options.raw_uri,
@@ -708,97 +732,91 @@ pub const Request = struct {
try w.writeAll(@tagName(req.version));
try w.writeAll("\r\n");
- if (!req.headers.contains("host")) {
- try w.writeAll("Host: ");
+ if (try emitOverridableHeader("host: ", req.headers.host, w)) {
+ try w.writeAll("host: ");
try req.uri.writeToStream(.{ .authority = true }, w);
try w.writeAll("\r\n");
}
- if ((req.uri.user != null or req.uri.password != null) and
- !req.headers.contains("authorization"))
- {
- try w.writeAll("Authorization: ");
- const authorization = try req.connection.?.allocWriteBuffer(
- @intCast(basic_authorization.valueLengthFromUri(req.uri)),
- );
- std.debug.assert(basic_authorization.value(req.uri, authorization).len == authorization.len);
- try w.writeAll("\r\n");
+ if (try emitOverridableHeader("authorization: ", req.headers.authorization, w)) {
+ if (req.uri.user != null or req.uri.password != null) {
+ try w.writeAll("authorization: ");
+ const authorization = try connection.allocWriteBuffer(
+ @intCast(basic_authorization.valueLengthFromUri(req.uri)),
+ );
+ assert(basic_authorization.value(req.uri, authorization).len == authorization.len);
+ try w.writeAll("\r\n");
+ }
}
- if (!req.headers.contains("user-agent")) {
- try w.writeAll("User-Agent: zig/");
+ if (try emitOverridableHeader("user-agent: ", req.headers.user_agent, w)) {
+ try w.writeAll("user-agent: zig/");
try w.writeAll(builtin.zig_version_string);
try w.writeAll(" (std.http)\r\n");
}
- if (!req.headers.contains("connection")) {
- try w.writeAll("Connection: keep-alive\r\n");
+ if (try emitOverridableHeader("connection: ", req.headers.connection, w)) {
+ if (req.keep_alive) {
+ try w.writeAll("connection: keep-alive\r\n");
+ } else {
+ try w.writeAll("connection: close\r\n");
+ }
}
- if (!req.headers.contains("accept-encoding")) {
- try w.writeAll("Accept-Encoding: gzip, deflate, zstd\r\n");
+ if (try emitOverridableHeader("accept-encoding: ", req.headers.accept_encoding, w)) {
+ try w.writeAll("accept-encoding: gzip, deflate, zstd\r\n");
}
- if (!req.headers.contains("te")) {
- try w.writeAll("TE: gzip, deflate, trailers\r\n");
+ switch (req.transfer_encoding) {
+ .chunked => try w.writeAll("transfer-encoding: chunked\r\n"),
+ .content_length => |len| try w.print("content-length: {d}\r\n", .{len}),
+ .none => {},
}
- const has_transfer_encoding = req.headers.contains("transfer-encoding");
- const has_content_length = req.headers.contains("content-length");
-
- if (!has_transfer_encoding and !has_content_length) {
- switch (req.transfer_encoding) {
- .chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
- .content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
- .none => {},
- }
- } else {
- if (has_transfer_encoding) {
- const transfer_encoding = req.headers.getFirstValue("transfer-encoding").?;
- if (std.mem.eql(u8, transfer_encoding, "chunked")) {
- req.transfer_encoding = .chunked;
- } else {
- return error.UnsupportedTransferEncoding;
- }
- } else if (has_content_length) {
- const content_length = std.fmt.parseInt(u64, req.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
-
- req.transfer_encoding = .{ .content_length = content_length };
- } else {
- req.transfer_encoding = .none;
- }
+ if (try emitOverridableHeader("content-type: ", req.headers.content_type, w)) {
+ // The default is to omit content-type if not provided because
+ // "application/octet-stream" is redundant.
}
- for (req.headers.list.items) |entry| {
- if (entry.value.len == 0) continue;
+ for (req.extra_headers) |header| {
+ assert(header.value.len != 0);
- try w.writeAll(entry.name);
+ try w.writeAll(header.name);
try w.writeAll(": ");
- try w.writeAll(entry.value);
+ try w.writeAll(header.value);
try w.writeAll("\r\n");
}
- if (req.connection.?.proxied) {
- const proxy_headers: ?http.Headers = switch (req.connection.?.protocol) {
- .plain => if (req.client.http_proxy) |proxy| proxy.headers else null,
- .tls => if (req.client.https_proxy) |proxy| proxy.headers else null,
- };
-
- if (proxy_headers) |headers| {
- for (headers.list.items) |entry| {
- if (entry.value.len == 0) continue;
+ if (connection.proxied) proxy: {
+ const proxy = switch (connection.protocol) {
+ .plain => req.client.http_proxy,
+ .tls => req.client.https_proxy,
+ } orelse break :proxy;
- try w.writeAll(entry.name);
- try w.writeAll(": ");
- try w.writeAll(entry.value);
- try w.writeAll("\r\n");
- }
- }
+ const authorization = proxy.authorization orelse break :proxy;
+ try w.writeAll("proxy-authorization: ");
+ try w.writeAll(authorization);
+ try w.writeAll("\r\n");
}
try w.writeAll("\r\n");
- try req.connection.?.flush();
+ try connection.flush();
+ }
+
+ /// Returns true if the default behavior is required, otherwise handles
+ /// writing (or not writing) the header.
+ fn emitOverridableHeader(prefix: []const u8, v: Headers.Value, w: anytype) !bool {
+ switch (v) {
+ .default => return true,
+ .omit => return false,
+ .override => |x| {
+ try w.writeAll(prefix);
+ try w.writeAll(x);
+ try w.writeAll("\r\n");
+ return false;
+ },
+ }
}
const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
@@ -829,7 +847,7 @@ pub const Request = struct {
RedirectRequiresResend,
HttpRedirectMissingLocation,
CompressionInitializationFailed,
- CompressionNotSupported,
+ CompressionUnsupported,
};
/// Waits for a response from the server and parses any headers that are sent.
@@ -843,12 +861,14 @@ pub const Request = struct {
/// Must be called after `send` and, if any data was written to the request
/// body, then also after `finish`.
pub fn wait(req: *Request) WaitError!void {
+ const connection = req.connection.?;
+
while (true) { // handle redirects
while (true) { // read headers
- try req.connection.?.fill();
+ try connection.fill();
- const nchecked = try req.response.parser.checkCompleteHead(req.connection.?.peek());
- req.connection.?.drop(@intCast(nchecked));
+ const nchecked = try req.response.parser.checkCompleteHead(connection.peek());
+ connection.drop(@intCast(nchecked));
if (req.response.parser.state.isContent()) break;
}
@@ -856,44 +876,36 @@ pub const Request = struct {
try req.response.parse(req.response.parser.get(), false);
if (req.response.status == .@"continue") {
- req.response.parser.state = .complete; // we're done parsing the continue response, reset to prepare for the real response
+ // We're done parsing the continue response; reset to prepare
+ // for the real response.
+ req.response.parser.state = .complete;
req.response.parser.reset();
if (req.handle_continue)
continue;
- return; // we're not handling the 100-continue, return to the caller
+ return; // we're not handling the 100-continue
}
// we're switching protocols, so this connection is no longer doing http
if (req.method == .CONNECT and req.response.status.class() == .success) {
- req.connection.?.closing = false;
+ connection.closing = false;
req.response.parser.state = .complete;
-
- return; // the connection is not HTTP past this point, return to the caller
+ return; // the connection is not HTTP past this point
}
- // we default to using keep-alive if not provided in the client if the server asks for it
- const req_connection = req.headers.getFirstValue("connection");
- const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
+ connection.closing = !req.response.keep_alive or !req.keep_alive;
- const res_connection = req.response.headers.getFirstValue("connection");
- const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?);
- if (res_keepalive and (req_keepalive or req_connection == null)) {
- req.connection.?.closing = false;
- } else {
- req.connection.?.closing = true;
- }
-
- // Any response to a HEAD request and any response with a 1xx (Informational), 204 (No Content), or 304 (Not Modified)
- // status code is always terminated by the first empty line after the header fields, regardless of the header fields
- // present in the message
+ // Any response to a HEAD request and any response with a 1xx
+ // (Informational), 204 (No Content), or 304 (Not Modified) status
+ // code is always terminated by the first empty line after the
+ // header fields, regardless of the header fields present in the
+ // message.
if (req.method == .HEAD or req.response.status.class() == .informational or
req.response.status == .no_content or req.response.status == .not_modified)
{
req.response.parser.state = .complete;
-
- return; // the response is empty, no further setup or redirection is necessary
+ return; // The response is empty; no further setup or redirection is necessary.
}
if (req.response.transfer_encoding != .none) {
@@ -922,7 +934,7 @@ pub const Request = struct {
if (req.redirect_behavior == .not_allowed) return error.TooManyHttpRedirects;
- const location = req.response.headers.getFirstValue("location") orelse
+ const location = req.response.location orelse
return error.HttpRedirectMissingLocation;
const arena = req.arena.allocator();
@@ -932,42 +944,44 @@ pub const Request = struct {
const new_url = Uri.parse(location_duped) catch try Uri.parseWithoutScheme(location_duped);
const resolved_url = try req.uri.resolve(new_url, false, arena);
- // is the redirect location on the same domain, or a subdomain of the original request?
const is_same_domain_or_subdomain =
std.ascii.endsWithIgnoreCase(resolved_url.host.?, req.uri.host.?) and
(resolved_url.host.?.len == req.uri.host.?.len or
resolved_url.host.?[resolved_url.host.?.len - req.uri.host.?.len - 1] == '.');
- if (resolved_url.host == null or !is_same_domain_or_subdomain or !std.ascii.eqlIgnoreCase(resolved_url.scheme, req.uri.scheme)) {
- // we're redirecting to a different domain, strip privileged headers like cookies
- _ = req.headers.delete("authorization");
- _ = req.headers.delete("www-authenticate");
- _ = req.headers.delete("cookie");
- _ = req.headers.delete("cookie2");
+ if (resolved_url.host == null or !is_same_domain_or_subdomain or
+ !std.ascii.eqlIgnoreCase(resolved_url.scheme, req.uri.scheme))
+ {
+ // When redirecting to a different domain, strip privileged headers.
+ req.privileged_headers = &.{};
}
- if (req.response.status == .see_other or ((req.response.status == .moved_permanently or req.response.status == .found) and req.method == .POST)) {
- // we're redirecting to a GET, so we need to change the method and remove the body
+ if (switch (req.response.status) {
+ .see_other => true,
+ .moved_permanently, .found => req.method == .POST,
+ else => false,
+ }) {
+ // A redirect to a GET must change the method and remove the body.
req.method = .GET;
req.transfer_encoding = .none;
- _ = req.headers.delete("transfer-encoding");
- _ = req.headers.delete("content-length");
- _ = req.headers.delete("content-type");
+ req.headers.content_type = .omit;
}
if (req.transfer_encoding != .none) {
- return error.RedirectRequiresResend; // The request body has already been sent. The request is still in a valid state, but the redirect must be handled manually.
+ // The request body has already been sent. The request is
+ // still in a valid state, but the redirect must be handled
+ // manually.
+ return error.RedirectRequiresResend;
}
try req.redirect(resolved_url);
-
try req.send(.{});
} else {
req.response.skip = false;
if (req.response.parser.state != .complete) {
switch (req.response.transfer_compression) {
.identity => req.response.compression = .none,
- .compress, .@"x-compress" => return error.CompressionNotSupported,
+ .compress, .@"x-compress" => return error.CompressionUnsupported,
.deflate => req.response.compression = .{
.deflate = std.compress.zlib.decompressor(req.transferReader()),
},
@@ -1092,16 +1106,12 @@ pub const Request = struct {
}
};
-/// A HTTP proxy server.
pub const Proxy = struct {
- allocator: Allocator,
- headers: http.Headers,
-
protocol: Connection.Protocol,
host: []const u8,
+ authorization: ?[]const u8,
port: u16,
-
- supports_connect: bool = true,
+ supports_connect: bool,
};
/// Release all associated resources with the client.
@@ -1113,116 +1123,71 @@ pub fn deinit(client: *Client) void {
client.connection_pool.deinit(client.allocator);
- if (client.http_proxy) |*proxy| {
- proxy.allocator.free(proxy.host);
- proxy.headers.deinit();
- }
-
- if (client.https_proxy) |*proxy| {
- proxy.allocator.free(proxy.host);
- proxy.headers.deinit();
- }
-
if (!disable_tls)
client.ca_bundle.deinit(client.allocator);
client.* = undefined;
}
-/// Uses the *_proxy environment variable to set any unset proxies for the client.
-/// This function *must not* be called when the client has any active connections.
-pub fn loadDefaultProxies(client: *Client) !void {
+/// Populates `http_proxy` and `http_proxy` via standard proxy environment variables.
+/// Asserts the client has no active connections.
+/// Uses `arena` for a few small allocations that must outlive the client, or
+/// at least until those fields are set to different values.
+pub fn initDefaultProxies(client: *Client, arena: Allocator) !void {
// Prevent any new connections from being created.
client.connection_pool.mutex.lock();
defer client.connection_pool.mutex.unlock();
- assert(client.connection_pool.used.first == null); // There are still active requests.
+ assert(client.connection_pool.used.first == null); // There are active requests.
- if (client.http_proxy == null) http: {
- const content: []const u8 = if (std.process.hasEnvVarConstant("http_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "http_proxy")
- else if (std.process.hasEnvVarConstant("HTTP_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "HTTP_PROXY")
- else if (std.process.hasEnvVarConstant("all_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "all_proxy")
- else if (std.process.hasEnvVarConstant("ALL_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "ALL_PROXY")
- else
- break :http;
- defer client.allocator.free(content);
-
- const uri = Uri.parse(content) catch
- Uri.parseWithoutScheme(content) catch
- break :http;
-
- const protocol = if (uri.scheme.len == 0)
- .plain // No scheme, assume http://
- else
- protocol_map.get(uri.scheme) orelse break :http; // Unknown scheme, ignore
-
- const host = if (uri.host) |host| try client.allocator.dupe(u8, host) else break :http; // Missing host, ignore
- client.http_proxy = .{
- .allocator = client.allocator,
- .headers = .{ .allocator = client.allocator },
-
- .protocol = protocol,
- .host = host,
- .port = uri.port orelse switch (protocol) {
- .plain => 80,
- .tls => 443,
- },
- };
+ if (client.http_proxy == null) {
+ client.http_proxy = try createProxyFromEnvVar(arena, &.{
+ "http_proxy", "HTTP_PROXY", "all_proxy", "ALL_PROXY",
+ });
+ }
- if (uri.user != null or uri.password != null) {
- const authorization = try client.allocator.alloc(u8, basic_authorization.valueLengthFromUri(uri));
- errdefer client.allocator.free(authorization);
- std.debug.assert(basic_authorization.value(uri, authorization).len == authorization.len);
- try client.http_proxy.?.headers.appendOwned(.{ .unowned = "proxy-authorization" }, .{ .owned = authorization });
- }
+ if (client.https_proxy == null) {
+ client.https_proxy = try createProxyFromEnvVar(arena, &.{
+ "https_proxy", "HTTPS_PROXY", "all_proxy", "ALL_PROXY",
+ });
}
+}
- if (client.https_proxy == null) https: {
- const content: []const u8 = if (std.process.hasEnvVarConstant("https_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "https_proxy")
- else if (std.process.hasEnvVarConstant("HTTPS_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "HTTPS_PROXY")
- else if (std.process.hasEnvVarConstant("all_proxy"))
- try std.process.getEnvVarOwned(client.allocator, "all_proxy")
- else if (std.process.hasEnvVarConstant("ALL_PROXY"))
- try std.process.getEnvVarOwned(client.allocator, "ALL_PROXY")
- else
- break :https;
- defer client.allocator.free(content);
-
- const uri = Uri.parse(content) catch
- Uri.parseWithoutScheme(content) catch
- break :https;
-
- const protocol = if (uri.scheme.len == 0)
- .plain // No scheme, assume http://
- else
- protocol_map.get(uri.scheme) orelse break :https; // Unknown scheme, ignore
-
- const host = if (uri.host) |host| try client.allocator.dupe(u8, host) else break :https; // Missing host, ignore
- client.https_proxy = .{
- .allocator = client.allocator,
- .headers = .{ .allocator = client.allocator },
-
- .protocol = protocol,
- .host = host,
- .port = uri.port orelse switch (protocol) {
- .plain => 80,
- .tls => 443,
- },
+fn createProxyFromEnvVar(arena: Allocator, env_var_names: []const []const u8) !?*Proxy {
+ const content = for (env_var_names) |name| {
+ break std.process.getEnvVarOwned(arena, name) catch |err| switch (err) {
+ error.EnvironmentVariableNotFound => continue,
+ else => |e| return e,
};
+ } else return null;
- if (uri.user != null or uri.password != null) {
- const authorization = try client.allocator.alloc(u8, basic_authorization.valueLengthFromUri(uri));
- errdefer client.allocator.free(authorization);
- std.debug.assert(basic_authorization.value(uri, authorization).len == authorization.len);
- try client.https_proxy.?.headers.appendOwned(.{ .unowned = "proxy-authorization" }, .{ .owned = authorization });
- }
- }
+ const uri = Uri.parse(content) catch try Uri.parseWithoutScheme(content);
+
+ const protocol = if (uri.scheme.len == 0)
+ .plain // No scheme, assume http://
+ else
+ protocol_map.get(uri.scheme) orelse return null; // Unknown scheme, ignore
+
+ const host = uri.host orelse return error.HttpProxyMissingHost;
+
+ const authorization: ?[]const u8 = if (uri.user != null or uri.password != null) a: {
+ const authorization = try arena.alloc(u8, basic_authorization.valueLengthFromUri(uri));
+ assert(basic_authorization.value(uri, authorization).len == authorization.len);
+ break :a authorization;
+ } else null;
+
+ const proxy = try arena.create(Proxy);
+ proxy.* = .{
+ .protocol = protocol,
+ .host = host,
+ .authorization = authorization,
+ .port = uri.port orelse switch (protocol) {
+ .plain => 80,
+ .tls => 443,
+ },
+ .supports_connect = true,
+ };
+ return proxy;
}
pub const basic_authorization = struct {
@@ -1244,8 +1209,8 @@ pub const basic_authorization = struct {
}
pub fn value(uri: Uri, out: []u8) []u8 {
- std.debug.assert(uri.user == null or uri.user.?.len <= max_user_len);
- std.debug.assert(uri.password == null or uri.password.?.len <= max_password_len);
+ assert(uri.user == null or uri.user.?.len <= max_user_len);
+ assert(uri.password == null or uri.password.?.len <= max_password_len);
@memcpy(out[0..prefix.len], prefix);
@@ -1356,7 +1321,8 @@ pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*Connecti
return &conn.data;
}
-/// Connect to `tunnel_host:tunnel_port` using the specified proxy with HTTP CONNECT. This will reuse a connection if one is already open.
+/// Connect to `tunnel_host:tunnel_port` using the specified proxy with HTTP
+/// CONNECT. This will reuse a connection if one is already open.
///
/// This function is threadsafe.
pub fn connectTunnel(
@@ -1394,7 +1360,7 @@ pub fn connectTunnel(
};
var buffer: [8096]u8 = undefined;
- var req = client.open(.CONNECT, uri, proxy.headers, .{
+ var req = client.open(.CONNECT, uri, .{
.redirect_behavior = .unhandled,
.connection = conn,
.server_header_buffer = &buffer,
@@ -1436,42 +1402,44 @@ pub fn connectTunnel(
const ConnectErrorPartial = ConnectTcpError || error{ UnsupportedUrlScheme, ConnectionRefused };
pub const ConnectError = ConnectErrorPartial || RequestError;
-/// Connect to `host:port` using the specified protocol. This will reuse a connection if one is already open.
-/// If a proxy is configured for the client, then the proxy will be used to connect to the host.
+/// Connect to `host:port` using the specified protocol. This will reuse a
+/// connection if one is already open.
+/// If a proxy is configured for the client, then the proxy will be used to
+/// connect to the host.
///
/// This function is threadsafe.
-pub fn connect(client: *Client, host: []const u8, port: u16, protocol: Connection.Protocol) ConnectError!*Connection {
- // pointer required so that `supports_connect` can be updated if a CONNECT fails
- const potential_proxy: ?*Proxy = switch (protocol) {
- .plain => if (client.http_proxy) |*proxy_info| proxy_info else null,
- .tls => if (client.https_proxy) |*proxy_info| proxy_info else null,
- };
-
- if (potential_proxy) |proxy| {
- // don't attempt to proxy the proxy thru itself.
- if (std.mem.eql(u8, proxy.host, host) and proxy.port == port and proxy.protocol == protocol) {
- return client.connectTcp(host, port, protocol);
- }
-
- if (proxy.supports_connect) tunnel: {
- return connectTunnel(client, proxy, host, port) catch |err| switch (err) {
- error.TunnelNotSupported => break :tunnel,
- else => |e| return e,
- };
- }
+pub fn connect(
+ client: *Client,
+ host: []const u8,
+ port: u16,
+ protocol: Connection.Protocol,
+) ConnectError!*Connection {
+ const proxy = switch (protocol) {
+ .plain => client.http_proxy,
+ .tls => client.https_proxy,
+ } orelse return client.connectTcp(host, port, protocol);
+
+ // Prevent proxying through itself.
+ if (std.mem.eql(u8, proxy.host, host) and proxy.port == port and proxy.protocol == protocol) {
+ return client.connectTcp(host, port, protocol);
+ }
- // fall back to using the proxy as a normal http proxy
- const conn = try client.connectTcp(proxy.host, proxy.port, proxy.protocol);
- errdefer {
- conn.closing = true;
- client.connection_pool.release(conn);
- }
+ if (proxy.supports_connect) tunnel: {
+ return connectTunnel(client, proxy, host, port) catch |err| switch (err) {
+ error.TunnelNotSupported => break :tunnel,
+ else => |e| return e,
+ };
+ }
- conn.proxied = true;
- return conn;
+ // fall back to using the proxy as a normal http proxy
+ const conn = try client.connectTcp(proxy.host, proxy.port, proxy.protocol);
+ errdefer {
+ conn.closing = true;
+ client.connection_pool.release(conn);
}
- return client.connectTcp(host, port, protocol);
+ conn.proxied = true;
+ return conn;
}
pub const RequestError = ConnectTcpError || ConnectErrorPartial || Request.SendError ||
@@ -1496,6 +1464,10 @@ pub const RequestOptions = struct {
/// you finish the request, then the request *will* deadlock.
handle_continue: bool = true,
+ /// If false, close the connection after the one request. If true,
+ /// participate in the client connection pool.
+ keep_alive: bool = true,
+
/// This field specifies whether to automatically follow redirects, and if
/// so, how many redirects to follow before returning an error.
///
@@ -1510,6 +1482,17 @@ pub const RequestOptions = struct {
/// Must be an already acquired connection.
connection: ?*Connection = null,
+
+ /// Standard headers that have default, but overridable, behavior.
+ headers: Request.Headers = .{},
+ /// These headers are kept including when following a redirect to a
+ /// different domain.
+ /// Externally-owned; must outlive the Request.
+ extra_headers: []const http.Header = &.{},
+ /// These headers are stripped when following a redirect to a different
+ /// domain.
+ /// Externally-owned; must outlive the Request.
+ privileged_headers: []const http.Header = &.{},
};
pub const protocol_map = std.ComptimeStringMap(Connection.Protocol, .{
@@ -1522,7 +1505,6 @@ pub const protocol_map = std.ComptimeStringMap(Connection.Protocol, .{
/// Open a connection to the host specified by `uri` and prepare to send a HTTP request.
///
/// `uri` must remain alive during the entire request.
-/// `headers` is cloned and may be freed after this function returns.
///
/// The caller is responsible for calling `deinit()` on the `Request`.
/// This function is threadsafe.
@@ -1530,7 +1512,6 @@ pub fn open(
client: *Client,
method: http.Method,
uri: Uri,
- headers: http.Headers,
options: RequestOptions,
) RequestError!Request {
const protocol = protocol_map.get(uri.scheme) orelse return error.UnsupportedUrlScheme;
@@ -1560,19 +1541,22 @@ pub fn open(
.uri = uri,
.client = client,
.connection = conn,
- .headers = try headers.clone(client.allocator), // Headers must be cloned to properly handle header transformations in redirects.
+ .keep_alive = options.keep_alive,
.method = method,
.version = options.version,
+ .transfer_encoding = .none,
.redirect_behavior = options.redirect_behavior,
.handle_continue = options.handle_continue,
.response = .{
.status = undefined,
.reason = undefined,
.version = undefined,
- .headers = http.Headers{ .allocator = client.allocator, .owned = false },
.parser = proto.HeadersParser.init(options.server_header_buffer),
},
.arena = undefined,
+ .headers = options.headers,
+ .extra_headers = options.extra_headers,
+ .privileged_headers = options.privileged_headers,
};
errdefer req.deinit();
@@ -1618,25 +1602,34 @@ pub const FetchOptions = struct {
location: Location,
method: http.Method = .GET,
- headers: http.Headers = .{ .allocator = std.heap.page_allocator, .owned = false },
payload: Payload = .none,
raw_uri: bool = false,
+
+ /// Standard headers that have default, but overridable, behavior.
+ headers: Request.Headers = .{},
+ /// These headers are kept including when following a redirect to a
+ /// different domain.
+ /// Externally-owned; must outlive the Request.
+ extra_headers: []const http.Header = &.{},
+ /// These headers are stripped when following a redirect to a different
+ /// domain.
+ /// Externally-owned; must outlive the Request.
+ privileged_headers: []const http.Header = &.{},
};
pub const FetchResult = struct {
status: http.Status,
body: ?[]const u8 = null,
- headers: http.Headers,
allocator: Allocator,
options: FetchOptions,
pub fn deinit(res: *FetchResult) void {
- if (res.options.response_strategy == .storage and res.options.response_strategy.storage == .dynamic) {
+ if (res.options.response_strategy == .storage and
+ res.options.response_strategy.storage == .dynamic)
+ {
if (res.body) |body| res.allocator.free(body);
}
-
- res.headers.deinit();
}
};
@@ -1644,21 +1637,19 @@ pub const FetchResult = struct {
///
/// This function is threadsafe.
pub fn fetch(client: *Client, allocator: Allocator, options: FetchOptions) !FetchResult {
- const has_transfer_encoding = options.headers.contains("transfer-encoding");
- const has_content_length = options.headers.contains("content-length");
-
- if (has_content_length or has_transfer_encoding) return error.UnsupportedHeader;
-
const uri = switch (options.location) {
.url => |u| try Uri.parse(u),
.uri => |u| u,
};
var server_header_buffer: [16 * 1024]u8 = undefined;
- var req = try open(client, options.method, uri, options.headers, .{
+ var req = try open(client, options.method, uri, .{
.server_header_buffer = options.server_header_buffer orelse &server_header_buffer,
.redirect_behavior = options.redirect_behavior orelse
if (options.payload == .none) @enumFromInt(3) else .unhandled,
+ .headers = options.headers,
+ .extra_headers = options.extra_headers,
+ .privileged_headers = options.privileged_headers,
});
defer req.deinit();
@@ -1690,10 +1681,8 @@ pub fn fetch(client: *Client, allocator: Allocator, options: FetchOptions) !Fetc
try req.wait();
- var res = FetchResult{
+ var res: FetchResult = .{
.status = req.response.status,
- .headers = try req.response.headers.clone(allocator),
-
.allocator = allocator,
.options = options,
};
lib/std/http/Headers.zig
@@ -1,527 +0,0 @@
-const std = @import("../std.zig");
-
-const Allocator = std.mem.Allocator;
-
-const testing = std.testing;
-const ascii = std.ascii;
-const assert = std.debug.assert;
-
-pub const HeaderList = std.ArrayListUnmanaged(Field);
-pub const HeaderIndexList = std.ArrayListUnmanaged(usize);
-pub const HeaderIndex = std.HashMapUnmanaged([]const u8, HeaderIndexList, CaseInsensitiveStringContext, std.hash_map.default_max_load_percentage);
-
-pub const CaseInsensitiveStringContext = struct {
- pub fn hash(self: @This(), s: []const u8) u64 {
- _ = self;
- var buf: [64]u8 = undefined;
- var i: usize = 0;
-
- var h = std.hash.Wyhash.init(0);
- while (i + 64 < s.len) : (i += 64) {
- const ret = ascii.lowerString(buf[0..], s[i..][0..64]);
- h.update(ret);
- }
-
- const left = @min(64, s.len - i);
- const ret = ascii.lowerString(buf[0..], s[i..][0..left]);
- h.update(ret);
-
- return h.final();
- }
-
- pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
- _ = self;
- return ascii.eqlIgnoreCase(a, b);
- }
-};
-
-/// A single HTTP header field.
-pub const Field = struct {
- name: []const u8,
- value: []const u8,
-
- fn lessThan(ctx: void, a: Field, b: Field) bool {
- _ = ctx;
- if (a.name.ptr == b.name.ptr) return false;
-
- return ascii.lessThanIgnoreCase(a.name, b.name);
- }
-};
-
-/// A list of HTTP header fields.
-pub const Headers = struct {
- allocator: Allocator,
- list: HeaderList = .{},
- index: HeaderIndex = .{},
-
- /// When this is false, names and values will not be duplicated.
- /// Use with caution.
- owned: bool = true,
-
- /// Initialize an empty list of headers.
- pub fn init(allocator: Allocator) Headers {
- return .{ .allocator = allocator };
- }
-
- /// Initialize a pre-populated list of headers from a list of fields.
- pub fn initList(allocator: Allocator, list: []const Field) !Headers {
- var new = Headers.init(allocator);
-
- try new.list.ensureTotalCapacity(allocator, list.len);
- try new.index.ensureTotalCapacity(allocator, @intCast(list.len));
- for (list) |field| {
- try new.append(field.name, field.value);
- }
-
- return new;
- }
-
- /// Deallocate all memory associated with the headers.
- ///
- /// If the `owned` field is false, this will not free the names and values of the headers.
- pub fn deinit(headers: *Headers) void {
- headers.deallocateIndexListsAndFields();
- headers.index.deinit(headers.allocator);
- headers.list.deinit(headers.allocator);
-
- headers.* = undefined;
- }
-
- /// Appends a header to the list.
- ///
- /// If the `owned` field is true, both name and value will be copied.
- pub fn append(headers: *Headers, name: []const u8, value: []const u8) !void {
- try headers.appendOwned(.{ .unowned = name }, .{ .unowned = value });
- }
-
- pub const OwnedString = union(enum) {
- /// A string allocated by the `allocator` field.
- owned: []u8,
- /// A string to be copied by the `allocator` field.
- unowned: []const u8,
- };
-
- /// Appends a header to the list.
- ///
- /// If the `owned` field is true, `name` and `value` will be copied if unowned.
- pub fn appendOwned(headers: *Headers, name: OwnedString, value: OwnedString) !void {
- const n = headers.list.items.len;
- try headers.list.ensureUnusedCapacity(headers.allocator, 1);
-
- const owned_value = switch (value) {
- .owned => |owned| owned,
- .unowned => |unowned| if (headers.owned)
- try headers.allocator.dupe(u8, unowned)
- else
- unowned,
- };
- errdefer if (value == .unowned and headers.owned) headers.allocator.free(owned_value);
-
- var entry = Field{ .name = undefined, .value = owned_value };
-
- if (headers.index.getEntry(switch (name) {
- inline else => |string| string,
- })) |kv| {
- defer switch (name) {
- .owned => |owned| headers.allocator.free(owned),
- .unowned => {},
- };
-
- entry.name = kv.key_ptr.*;
- try kv.value_ptr.append(headers.allocator, n);
- } else {
- const owned_name = switch (name) {
- .owned => |owned| owned,
- .unowned => |unowned| if (headers.owned)
- try std.ascii.allocLowerString(headers.allocator, unowned)
- else
- unowned,
- };
- errdefer if (name == .unowned and headers.owned) headers.allocator.free(owned_name);
-
- entry.name = owned_name;
-
- var new_index = try HeaderIndexList.initCapacity(headers.allocator, 1);
- errdefer new_index.deinit(headers.allocator);
-
- new_index.appendAssumeCapacity(n);
- try headers.index.put(headers.allocator, owned_name, new_index);
- }
-
- headers.list.appendAssumeCapacity(entry);
- }
-
- /// Returns true if this list of headers contains the given name.
- pub fn contains(headers: Headers, name: []const u8) bool {
- return headers.index.contains(name);
- }
-
- /// Removes all headers with the given name.
- pub fn delete(headers: *Headers, name: []const u8) bool {
- if (headers.index.fetchRemove(name)) |kv| {
- var index = kv.value;
-
- // iterate backwards
- var i = index.items.len;
- while (i > 0) {
- i -= 1;
- const data_index = index.items[i];
- const removed = headers.list.orderedRemove(data_index);
-
- assert(ascii.eqlIgnoreCase(removed.name, name)); // ensure the index hasn't been corrupted
- if (headers.owned) headers.allocator.free(removed.value);
- }
-
- if (headers.owned) headers.allocator.free(kv.key);
- index.deinit(headers.allocator);
- headers.rebuildIndex();
-
- return true;
- } else {
- return false;
- }
- }
-
- /// Returns the index of the first occurrence of a header with the given name.
- pub fn firstIndexOf(headers: Headers, name: []const u8) ?usize {
- const index = headers.index.get(name) orelse return null;
-
- return index.items[0];
- }
-
- /// Returns a list of indices containing headers with the given name.
- pub fn getIndices(headers: Headers, name: []const u8) ?[]const usize {
- const index = headers.index.get(name) orelse return null;
-
- return index.items;
- }
-
- /// Returns the entry of the first occurrence of a header with the given name.
- pub fn getFirstEntry(headers: Headers, name: []const u8) ?Field {
- const first_index = headers.firstIndexOf(name) orelse return null;
-
- return headers.list.items[first_index];
- }
-
- /// Returns a slice containing each header with the given name.
- /// The caller owns the returned slice, but NOT the values in the slice.
- pub fn getEntries(headers: Headers, allocator: Allocator, name: []const u8) !?[]const Field {
- const indices = headers.getIndices(name) orelse return null;
-
- const buf = try allocator.alloc(Field, indices.len);
- for (indices, 0..) |idx, n| {
- buf[n] = headers.list.items[idx];
- }
-
- return buf;
- }
-
- /// Returns the value in the entry of the first occurrence of a header with the given name.
- pub fn getFirstValue(headers: Headers, name: []const u8) ?[]const u8 {
- const first_index = headers.firstIndexOf(name) orelse return null;
-
- return headers.list.items[first_index].value;
- }
-
- /// Returns a slice containing the value of each header with the given name.
- /// The caller owns the returned slice, but NOT the values in the slice.
- pub fn getValues(headers: Headers, allocator: Allocator, name: []const u8) !?[]const []const u8 {
- const indices = headers.getIndices(name) orelse return null;
-
- const buf = try allocator.alloc([]const u8, indices.len);
- for (indices, 0..) |idx, n| {
- buf[n] = headers.list.items[idx].value;
- }
-
- return buf;
- }
-
- fn rebuildIndex(headers: *Headers) void {
- // clear out the indexes
- var it = headers.index.iterator();
- while (it.next()) |entry| {
- entry.value_ptr.shrinkRetainingCapacity(0);
- }
-
- // fill up indexes again; we know capacity is fine from before
- for (headers.list.items, 0..) |entry, i| {
- headers.index.getEntry(entry.name).?.value_ptr.appendAssumeCapacity(i);
- }
- }
-
- /// Sorts the headers in lexicographical order.
- pub fn sort(headers: *Headers) void {
- std.mem.sort(Field, headers.list.items, {}, Field.lessThan);
- headers.rebuildIndex();
- }
-
- /// Writes the headers to the given stream.
- pub fn format(
- headers: Headers,
- comptime fmt: []const u8,
- options: std.fmt.FormatOptions,
- out_stream: anytype,
- ) !void {
- _ = fmt;
- _ = options;
-
- for (headers.list.items) |entry| {
- if (entry.value.len == 0) continue;
-
- try out_stream.writeAll(entry.name);
- try out_stream.writeAll(": ");
- try out_stream.writeAll(entry.value);
- try out_stream.writeAll("\r\n");
- }
- }
-
- /// Writes all of the headers with the given name to the given stream, separated by commas.
- ///
- /// This is useful for headers like `Set-Cookie` which can have multiple values. RFC 9110, Section 5.2
- pub fn formatCommaSeparated(
- headers: Headers,
- name: []const u8,
- out_stream: anytype,
- ) !void {
- const indices = headers.getIndices(name) orelse return;
-
- try out_stream.writeAll(name);
- try out_stream.writeAll(": ");
-
- for (indices, 0..) |idx, n| {
- if (n != 0) try out_stream.writeAll(", ");
- try out_stream.writeAll(headers.list.items[idx].value);
- }
-
- try out_stream.writeAll("\r\n");
- }
-
- /// Frees all `HeaderIndexList`s within `index`.
- /// Frees names and values of all fields if they are owned.
- fn deallocateIndexListsAndFields(headers: *Headers) void {
- var it = headers.index.iterator();
- while (it.next()) |entry| {
- entry.value_ptr.deinit(headers.allocator);
-
- if (headers.owned) headers.allocator.free(entry.key_ptr.*);
- }
-
- if (headers.owned) {
- for (headers.list.items) |entry| {
- headers.allocator.free(entry.value);
- }
- }
- }
-
- /// Clears and frees the underlying data structures.
- /// Frees names and values if they are owned.
- pub fn clearAndFree(headers: *Headers) void {
- headers.deallocateIndexListsAndFields();
- headers.index.clearAndFree(headers.allocator);
- headers.list.clearAndFree(headers.allocator);
- }
-
- /// Clears the underlying data structures while retaining their capacities.
- /// Frees names and values if they are owned.
- pub fn clearRetainingCapacity(headers: *Headers) void {
- headers.deallocateIndexListsAndFields();
- headers.index.clearRetainingCapacity();
- headers.list.clearRetainingCapacity();
- }
-
- /// Creates a copy of the headers using the provided allocator.
- pub fn clone(headers: Headers, allocator: Allocator) !Headers {
- var new = Headers.init(allocator);
-
- try new.list.ensureTotalCapacity(allocator, headers.list.capacity);
- try new.index.ensureTotalCapacity(allocator, headers.index.capacity());
- for (headers.list.items) |field| {
- try new.append(field.name, field.value);
- }
-
- return new;
- }
-};
-
-test "Headers.append" {
- var h = Headers{ .allocator = std.testing.allocator };
- defer h.deinit();
-
- try h.append("foo", "bar");
- try h.append("hello", "world");
-
- try testing.expect(h.contains("Foo"));
- try testing.expect(!h.contains("Bar"));
-}
-
-test "Headers.delete" {
- var h = Headers{ .allocator = std.testing.allocator };
- defer h.deinit();
-
- try h.append("foo", "bar");
- try h.append("hello", "world");
-
- try testing.expect(h.contains("Foo"));
-
- _ = h.delete("Foo");
-
- try testing.expect(!h.contains("foo"));
-}
-
-test "Headers consistency" {
- var h = Headers{ .allocator = std.testing.allocator };
- defer h.deinit();
-
- try h.append("foo", "bar");
- try h.append("hello", "world");
- _ = h.delete("Foo");
-
- try h.append("foo", "bar");
- try h.append("bar", "world");
- try h.append("foo", "baz");
- try h.append("baz", "hello");
-
- try testing.expectEqual(@as(?usize, 0), h.firstIndexOf("hello"));
- try testing.expectEqual(@as(?usize, 1), h.firstIndexOf("foo"));
- try testing.expectEqual(@as(?usize, 2), h.firstIndexOf("bar"));
- try testing.expectEqual(@as(?usize, 4), h.firstIndexOf("baz"));
- try testing.expectEqual(@as(?usize, null), h.firstIndexOf("pog"));
-
- try testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("hello").?);
- try testing.expectEqualSlices(usize, &[_]usize{ 1, 3 }, h.getIndices("foo").?);
- try testing.expectEqualSlices(usize, &[_]usize{2}, h.getIndices("bar").?);
- try testing.expectEqualSlices(usize, &[_]usize{4}, h.getIndices("baz").?);
- try testing.expectEqual(@as(?[]const usize, null), h.getIndices("pog"));
-
- try testing.expectEqualStrings("world", h.getFirstEntry("hello").?.value);
- try testing.expectEqualStrings("bar", h.getFirstEntry("foo").?.value);
- try testing.expectEqualStrings("world", h.getFirstEntry("bar").?.value);
- try testing.expectEqualStrings("hello", h.getFirstEntry("baz").?.value);
-
- const hello_entries = (try h.getEntries(testing.allocator, "hello")).?;
- defer testing.allocator.free(hello_entries);
- try testing.expectEqualDeep(@as([]const Field, &[_]Field{
- .{ .name = "hello", .value = "world" },
- }), hello_entries);
-
- const foo_entries = (try h.getEntries(testing.allocator, "foo")).?;
- defer testing.allocator.free(foo_entries);
- try testing.expectEqualDeep(@as([]const Field, &[_]Field{
- .{ .name = "foo", .value = "bar" },
- .{ .name = "foo", .value = "baz" },
- }), foo_entries);
-
- const bar_entries = (try h.getEntries(testing.allocator, "bar")).?;
- defer testing.allocator.free(bar_entries);
- try testing.expectEqualDeep(@as([]const Field, &[_]Field{
- .{ .name = "bar", .value = "world" },
- }), bar_entries);
-
- const baz_entries = (try h.getEntries(testing.allocator, "baz")).?;
- defer testing.allocator.free(baz_entries);
- try testing.expectEqualDeep(@as([]const Field, &[_]Field{
- .{ .name = "baz", .value = "hello" },
- }), baz_entries);
-
- const pog_entries = (try h.getEntries(testing.allocator, "pog"));
- try testing.expectEqual(@as(?[]const Field, null), pog_entries);
-
- try testing.expectEqualStrings("world", h.getFirstValue("hello").?);
- try testing.expectEqualStrings("bar", h.getFirstValue("foo").?);
- try testing.expectEqualStrings("world", h.getFirstValue("bar").?);
- try testing.expectEqualStrings("hello", h.getFirstValue("baz").?);
- try testing.expectEqual(@as(?[]const u8, null), h.getFirstValue("pog"));
-
- const hello_values = (try h.getValues(testing.allocator, "hello")).?;
- defer testing.allocator.free(hello_values);
- try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"world"}), hello_values);
-
- const foo_values = (try h.getValues(testing.allocator, "foo")).?;
- defer testing.allocator.free(foo_values);
- try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{ "bar", "baz" }), foo_values);
-
- const bar_values = (try h.getValues(testing.allocator, "bar")).?;
- defer testing.allocator.free(bar_values);
- try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"world"}), bar_values);
-
- const baz_values = (try h.getValues(testing.allocator, "baz")).?;
- defer testing.allocator.free(baz_values);
- try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"hello"}), baz_values);
-
- const pog_values = (try h.getValues(testing.allocator, "pog"));
- try testing.expectEqual(@as(?[]const []const u8, null), pog_values);
-
- h.sort();
-
- try testing.expectEqualSlices(usize, &[_]usize{0}, h.getIndices("bar").?);
- try testing.expectEqualSlices(usize, &[_]usize{1}, h.getIndices("baz").?);
- try testing.expectEqualSlices(usize, &[_]usize{ 2, 3 }, h.getIndices("foo").?);
- try testing.expectEqualSlices(usize, &[_]usize{4}, h.getIndices("hello").?);
-
- const formatted_values = try std.fmt.allocPrint(testing.allocator, "{}", .{h});
- defer testing.allocator.free(formatted_values);
-
- try testing.expectEqualStrings("bar: world\r\nbaz: hello\r\nfoo: bar\r\nfoo: baz\r\nhello: world\r\n", formatted_values);
-
- var buf: [128]u8 = undefined;
- var fbs = std.io.fixedBufferStream(&buf);
- const writer = fbs.writer();
-
- try h.formatCommaSeparated("foo", writer);
- try testing.expectEqualStrings("foo: bar, baz\r\n", fbs.getWritten());
-}
-
-test "Headers.clearRetainingCapacity and clearAndFree" {
- var h = Headers.init(std.testing.allocator);
- defer h.deinit();
-
- h.clearRetainingCapacity();
-
- try h.append("foo", "bar");
- try h.append("bar", "world");
- try h.append("foo", "baz");
- try h.append("baz", "hello");
- try testing.expectEqual(@as(usize, 4), h.list.items.len);
- try testing.expectEqual(@as(usize, 3), h.index.count());
- const list_capacity = h.list.capacity;
- const index_capacity = h.index.capacity();
-
- h.clearRetainingCapacity();
- try testing.expectEqual(@as(usize, 0), h.list.items.len);
- try testing.expectEqual(@as(usize, 0), h.index.count());
- try testing.expectEqual(list_capacity, h.list.capacity);
- try testing.expectEqual(index_capacity, h.index.capacity());
-
- try h.append("foo", "bar");
- try h.append("bar", "world");
- try h.append("foo", "baz");
- try h.append("baz", "hello");
- try testing.expectEqual(@as(usize, 4), h.list.items.len);
- try testing.expectEqual(@as(usize, 3), h.index.count());
- // Capacity should still be the same since we shouldn't have needed to grow
- // when adding back the same fields
- try testing.expectEqual(list_capacity, h.list.capacity);
- try testing.expectEqual(index_capacity, h.index.capacity());
-
- h.clearAndFree();
- try testing.expectEqual(@as(usize, 0), h.list.items.len);
- try testing.expectEqual(@as(usize, 0), h.index.count());
- try testing.expectEqual(@as(usize, 0), h.list.capacity);
- try testing.expectEqual(@as(usize, 0), h.index.capacity());
-}
-
-test "Headers.initList" {
- var h = try Headers.initList(std.testing.allocator, &.{
- .{ .name = "Accept-Encoding", .value = "gzip" },
- .{ .name = "Authorization", .value = "it's over 9000!" },
- });
- defer h.deinit();
-
- const encoding_values = (try h.getValues(testing.allocator, "Accept-Encoding")).?;
- defer testing.allocator.free(encoding_values);
- try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"gzip"}), encoding_values);
-
- const authorization_values = (try h.getValues(testing.allocator, "Authorization")).?;
- defer testing.allocator.free(authorization_values);
- try testing.expectEqualDeep(@as([]const []const u8, &[_][]const u8{"it's over 9000!"}), authorization_values);
-}
lib/std/http/Server.zig
@@ -162,11 +162,13 @@ pub const ResponseTransfer = union(enum) {
pub const Compression = union(enum) {
pub const DeflateDecompressor = std.compress.zlib.Decompressor(Response.TransferReader);
pub const GzipDecompressor = std.compress.gzip.Decompressor(Response.TransferReader);
- pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Response.TransferReader, .{});
+ // https://github.com/ziglang/zig/issues/18937
+ //pub const ZstdDecompressor = std.compress.zstd.DecompressStream(Response.TransferReader, .{});
deflate: DeflateDecompressor,
gzip: GzipDecompressor,
- zstd: ZstdDecompressor,
+ // https://github.com/ziglang/zig/issues/18937
+ //zstd: ZstdDecompressor,
none: void,
};
@@ -179,7 +181,7 @@ pub const Request = struct {
HttpTransferEncodingUnsupported,
HttpConnectionHeaderUnsupported,
InvalidContentLength,
- CompressionNotSupported,
+ CompressionUnsupported,
};
pub fn parse(req: *Request, bytes: []const u8) ParseError!void {
@@ -189,13 +191,15 @@ pub const Request = struct {
if (first_line.len < 10)
return error.HttpHeadersInvalid;
- const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
+ const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse
+ return error.HttpHeadersInvalid;
if (method_end > 24) return error.HttpHeadersInvalid;
const method_str = first_line[0..method_end];
const method: http.Method = @enumFromInt(http.Method.parse(method_str));
- const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
+ const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse
+ return error.HttpHeadersInvalid;
if (version_start == method_end) return error.HttpHeadersInvalid;
const version_str = first_line[version_start + 1 ..];
@@ -223,11 +227,26 @@ pub const Request = struct {
const header_name = line_it.next() orelse return error.HttpHeadersInvalid;
const header_value = line_it.rest();
- try req.headers.append(header_name, header_value);
-
- if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
+ if (std.ascii.eqlIgnoreCase(header_name, "connection")) {
+ req.keep_alive = !std.ascii.eqlIgnoreCase(header_value, "close");
+ } else if (std.ascii.eqlIgnoreCase(header_name, "expect")) {
+ req.expect = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-type")) {
+ req.content_type = header_value;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
if (req.content_length != null) return error.HttpHeadersInvalid;
- req.content_length = std.fmt.parseInt(u64, header_value, 10) catch return error.InvalidContentLength;
+ req.content_length = std.fmt.parseInt(u64, header_value, 10) catch
+ return error.InvalidContentLength;
+ } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
+ if (req.transfer_compression != .identity) return error.HttpHeadersInvalid;
+
+ const trimmed = mem.trim(u8, header_value, " ");
+
+ if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
+ req.transfer_compression = ce;
+ } else {
+ return error.HttpTransferEncodingUnsupported;
+ }
} else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
// Transfer-Encoding: second, first
// Transfer-Encoding: deflate, chunked
@@ -238,7 +257,8 @@ pub const Request = struct {
var next: ?[]const u8 = first;
if (std.meta.stringToEnum(http.TransferEncoding, trimmed_first)) |transfer| {
- if (req.transfer_encoding != .none) return error.HttpHeadersInvalid; // we already have a transfer encoding
+ if (req.transfer_encoding != .none)
+ return error.HttpHeadersInvalid; // we already have a transfer encoding
req.transfer_encoding = transfer;
next = iter.next();
@@ -248,7 +268,8 @@ pub const Request = struct {
const trimmed_second = mem.trim(u8, second, " ");
if (std.meta.stringToEnum(http.ContentEncoding, trimmed_second)) |transfer| {
- if (req.transfer_compression != .identity) return error.HttpHeadersInvalid; // double compression is not supported
+ if (req.transfer_compression != .identity)
+ return error.HttpHeadersInvalid; // double compression is not supported
req.transfer_compression = transfer;
} else {
return error.HttpTransferEncodingUnsupported;
@@ -256,45 +277,23 @@ pub const Request = struct {
}
if (iter.next()) |_| return error.HttpTransferEncodingUnsupported;
- } else if (std.ascii.eqlIgnoreCase(header_name, "content-encoding")) {
- if (req.transfer_compression != .identity) return error.HttpHeadersInvalid;
-
- const trimmed = mem.trim(u8, header_value, " ");
-
- if (std.meta.stringToEnum(http.ContentEncoding, trimmed)) |ce| {
- req.transfer_compression = ce;
- } else {
- return error.HttpTransferEncodingUnsupported;
- }
}
}
}
inline fn int64(array: *const [8]u8) u64 {
- return @as(u64, @bitCast(array.*));
+ return @bitCast(array.*);
}
- /// The HTTP request method.
method: http.Method,
-
- /// The HTTP request target.
target: []const u8,
-
- /// The HTTP version of this request.
version: http.Version,
-
- /// The length of the request body, if known.
+ expect: ?[]const u8 = null,
+ content_type: ?[]const u8 = null,
content_length: ?u64 = null,
-
- /// The transfer encoding of the request body, or .none if not present.
transfer_encoding: http.TransferEncoding = .none,
-
- /// The compression of the request body, or .identity (no compression) if not present.
transfer_compression: http.ContentEncoding = .identity,
-
- /// The list of HTTP request headers
- headers: http.Headers,
-
+ keep_alive: bool = false,
parser: proto.HeadersParser,
compression: Compression = .none,
};
@@ -311,11 +310,8 @@ pub const Response = struct {
version: http.Version = .@"HTTP/1.1",
status: http.Status = .ok,
reason: ?[]const u8 = null,
-
- transfer_encoding: ResponseTransfer = .none,
-
- /// The allocator responsible for allocating memory for this response.
- allocator: Allocator,
+ transfer_encoding: ResponseTransfer,
+ keep_alive: bool,
/// The peer's address
address: net.Address,
@@ -323,8 +319,8 @@ pub const Response = struct {
/// The underlying connection for this response.
connection: Connection,
- /// The HTTP response headers
- headers: http.Headers,
+ /// Externally-owned; must outlive the Response.
+ extra_headers: []const http.Header = &.{},
/// The HTTP request that this response is responding to.
///
@@ -333,7 +329,7 @@ pub const Response = struct {
state: State = .first,
- const State = enum {
+ pub const State = enum {
first,
start,
waited,
@@ -344,14 +340,12 @@ pub const Response = struct {
/// Free all resources associated with this response.
pub fn deinit(res: *Response) void {
res.connection.close();
-
- res.headers.deinit();
- res.request.headers.deinit();
}
pub const ResetState = enum { reset, closing };
- /// Reset this response to its initial state. This must be called before handling a second request on the same connection.
+ /// Reset this response to its initial state. This must be called before
+ /// handling a second request on the same connection.
pub fn reset(res: *Response) ResetState {
if (res.state == .first) {
res.state = .start;
@@ -364,27 +358,11 @@ pub const Response = struct {
return .closing;
}
- // A connection is only keep-alive if the Connection header is present and it's value is not "close".
- // The server and client must both agree
+ // A connection is only keep-alive if the Connection header is present
+ // and its value is not "close". The server and client must both agree.
//
// send() defaults to using keep-alive if the client requests it.
- const res_connection = res.headers.getFirstValue("connection");
- const res_keepalive = res_connection != null and !std.ascii.eqlIgnoreCase("close", res_connection.?);
-
- const req_connection = res.request.headers.getFirstValue("connection");
- const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
- if (req_keepalive and (res_keepalive or res_connection == null)) {
- res.connection.closing = false;
- } else {
- res.connection.closing = true;
- }
-
- switch (res.request.compression) {
- .none => {},
- .deflate => {},
- .gzip => {},
- .zstd => |*zstd| zstd.deinit(),
- }
+ res.connection.closing = !res.keep_alive or !res.request.keep_alive;
res.state = .start;
res.version = .@"HTTP/1.1";
@@ -393,27 +371,22 @@ pub const Response = struct {
res.transfer_encoding = .none;
- res.headers.clearRetainingCapacity();
-
- res.request.headers.clearAndFree(); // FIXME: figure out why `clearRetainingCapacity` causes a leak in hash_map here
res.request.parser.reset();
- res.request = Request{
+ res.request = .{
.version = undefined,
.method = undefined,
.target = undefined,
- .headers = res.request.headers,
.parser = res.request.parser,
};
- if (res.connection.closing) {
- return .closing;
- } else {
- return .reset;
- }
+ return if (res.connection.closing) .closing else .reset;
}
- pub const SendError = Connection.WriteError || error{ UnsupportedTransferEncoding, InvalidContentLength };
+ pub const SendError = Connection.WriteError || error{
+ UnsupportedTransferEncoding,
+ InvalidContentLength,
+ };
/// Send the HTTP response headers to the client.
pub fn send(res: *Response) SendError!void {
@@ -439,44 +412,21 @@ pub const Response = struct {
if (res.status == .@"continue") {
res.state = .waited; // we still need to send another request after this
} else {
- if (!res.headers.contains("connection")) {
- const req_connection = res.request.headers.getFirstValue("connection");
- const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
-
- if (req_keepalive) {
- try w.writeAll("Connection: keep-alive\r\n");
- } else {
- try w.writeAll("Connection: close\r\n");
- }
+ if (res.keep_alive and res.request.keep_alive) {
+ try w.writeAll("connection: keep-alive\r\n");
+ } else {
+ try w.writeAll("connection: close\r\n");
}
- const has_transfer_encoding = res.headers.contains("transfer-encoding");
- const has_content_length = res.headers.contains("content-length");
-
- if (!has_transfer_encoding and !has_content_length) {
- switch (res.transfer_encoding) {
- .chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
- .content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
- .none => {},
- }
- } else {
- if (has_content_length) {
- const content_length = std.fmt.parseInt(u64, res.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
-
- res.transfer_encoding = .{ .content_length = content_length };
- } else if (has_transfer_encoding) {
- const transfer_encoding = res.headers.getFirstValue("transfer-encoding").?;
- if (std.mem.eql(u8, transfer_encoding, "chunked")) {
- res.transfer_encoding = .chunked;
- } else {
- return error.UnsupportedTransferEncoding;
- }
- } else {
- res.transfer_encoding = .none;
- }
+ switch (res.transfer_encoding) {
+ .chunked => try w.writeAll("transfer-encoding: chunked\r\n"),
+ .content_length => |content_length| try w.print("content-length: {d}\r\n", .{content_length}),
+ .none => {},
}
- try w.print("{}", .{res.headers});
+ for (res.extra_headers) |header| {
+ try w.print("{s}: {s}\r\n", .{ header.name, header.value });
+ }
}
if (res.request.method == .HEAD) {
@@ -511,7 +461,7 @@ pub const Response = struct {
pub const WaitError = Connection.ReadError ||
proto.HeadersParser.CheckCompleteHeadError || Request.ParseError ||
- error{ CompressionInitializationFailed, CompressionNotSupported };
+ error{CompressionUnsupported};
/// Wait for the client to send a complete request head.
///
@@ -545,37 +495,37 @@ pub const Response = struct {
if (res.request.parser.state.isContent()) break;
}
- res.request.headers = .{ .allocator = res.allocator, .owned = true };
try res.request.parse(res.request.parser.get());
- if (res.request.transfer_encoding != .none) {
- switch (res.request.transfer_encoding) {
- .none => unreachable,
- .chunked => {
- res.request.parser.next_chunk_length = 0;
- res.request.parser.state = .chunk_head_size;
- },
- }
- } else if (res.request.content_length) |cl| {
- res.request.parser.next_chunk_length = cl;
+ switch (res.request.transfer_encoding) {
+ .none => {
+ if (res.request.content_length) |len| {
+ res.request.parser.next_chunk_length = len;
- if (cl == 0) res.request.parser.state = .complete;
- } else {
- res.request.parser.state = .complete;
+ if (len == 0) res.request.parser.state = .complete;
+ } else {
+ res.request.parser.state = .complete;
+ }
+ },
+ .chunked => {
+ res.request.parser.next_chunk_length = 0;
+ res.request.parser.state = .chunk_head_size;
+ },
}
if (res.request.parser.state != .complete) {
switch (res.request.transfer_compression) {
.identity => res.request.compression = .none,
- .compress, .@"x-compress" => return error.CompressionNotSupported,
+ .compress, .@"x-compress" => return error.CompressionUnsupported,
.deflate => res.request.compression = .{
.deflate = std.compress.zlib.decompressor(res.transferReader()),
},
.gzip, .@"x-gzip" => res.request.compression = .{
.gzip = std.compress.gzip.decompressor(res.transferReader()),
},
- .zstd => res.request.compression = .{
- .zstd = std.compress.zstd.decompressStream(res.allocator, res.transferReader()),
+ .zstd => {
+ // https://github.com/ziglang/zig/issues/18937
+ return error.CompressionUnsupported;
},
}
}
@@ -599,7 +549,8 @@ pub const Response = struct {
const out_index = switch (res.request.compression) {
.deflate => |*deflate| deflate.read(buffer) catch return error.DecompressionFailure,
.gzip => |*gzip| gzip.read(buffer) catch return error.DecompressionFailure,
- .zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
+ // https://github.com/ziglang/zig/issues/18937
+ //.zstd => |*zstd| zstd.read(buffer) catch return error.DecompressionFailure,
else => try res.transferRead(buffer),
};
@@ -614,8 +565,6 @@ pub const Response = struct {
}
if (has_trail) {
- res.request.headers = http.Headers{ .allocator = res.allocator, .owned = false };
-
// The response headers before the trailers are already
// guaranteed to be valid, so they will always be parsed again
// and cannot return an error.
@@ -736,18 +685,17 @@ pub fn accept(server: *Server, options: AcceptOptions) AcceptError!Response {
const in = try server.socket.accept();
return .{
- .allocator = options.allocator,
+ .transfer_encoding = .none,
+ .keep_alive = true,
.address = in.address,
.connection = .{
.stream = in.stream,
.protocol = .plain,
},
- .headers = .{ .allocator = options.allocator },
.request = .{
.version = undefined,
.method = undefined,
.target = undefined,
- .headers = .{ .allocator = options.allocator, .owned = false },
.parser = proto.HeadersParser.init(options.client_header_buffer),
},
};
@@ -793,8 +741,10 @@ test "HTTP server handles a chunked transfer coding request" {
const server_body: []const u8 = "message from server!\n";
res.transfer_encoding = .{ .content_length = server_body.len };
- try res.headers.append("content-type", "text/plain");
- try res.headers.append("connection", "close");
+ res.extra_headers = &.{
+ .{ .name = "content-type", .value = "text/plain" },
+ };
+ res.keep_alive = false;
try res.send();
var buf: [128]u8 = undefined;
lib/std/http.zig
@@ -3,10 +3,6 @@ const std = @import("std.zig");
pub const Client = @import("http/Client.zig");
pub const Server = @import("http/Server.zig");
pub const protocol = @import("http/protocol.zig");
-const headers = @import("http/Headers.zig");
-
-pub const Headers = headers.Headers;
-pub const Field = headers.Field;
pub const Version = enum {
@"HTTP/1.0",
@@ -18,7 +14,7 @@ pub const Version = enum {
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
///
/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
-pub const Method = enum(u64) { // TODO: should be u192 or u256, but neither is supported by the C backend, and therefore cannot pass CI
+pub const Method = enum(u64) {
GET = parse("GET"),
HEAD = parse("HEAD"),
POST = parse("POST"),
@@ -309,6 +305,11 @@ pub const Connection = enum {
close,
};
+pub const Header = struct {
+ name: []const u8,
+ value: []const u8,
+};
+
test {
_ = Client;
_ = Method;
src/Package/Fetch/git.zig
@@ -530,13 +530,12 @@ pub const Session = struct {
info_refs_uri.query = "service=git-upload-pack";
info_refs_uri.fragment = null;
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("Git-Protocol", "version=2");
-
- var request = try session.transport.open(.GET, info_refs_uri, headers, .{
- .max_redirects = 3,
+ var request = try session.transport.open(.GET, info_refs_uri, .{
+ .redirect_behavior = @enumFromInt(3),
.server_header_buffer = http_headers_buffer,
+ .extra_headers = &.{
+ .{ .name = "Git-Protocol", .value = "version=2" },
+ },
});
errdefer request.deinit();
try request.send(.{});
@@ -544,7 +543,12 @@ pub const Session = struct {
try request.wait();
if (request.response.status != .ok) return error.ProtocolError;
- if (request.redirects_left < 3) {
+ // Pretty sure this is dead code - in order for a redirect to occur, the status
+ // code would need to be in the 300s and then it would not be "OK" which is checked
+ // on the line above.
+ var runtime_false = false;
+ _ = &runtime_false;
+ if (runtime_false) {
if (!mem.endsWith(u8, request.uri.path, "/info/refs")) return error.UnparseableRedirect;
var new_uri = request.uri;
new_uri.path = new_uri.path[0 .. new_uri.path.len - "/info/refs".len];
@@ -634,11 +638,6 @@ pub const Session = struct {
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("Content-Type", "application/x-git-upload-pack-request");
- try headers.append("Git-Protocol", "version=2");
-
var body = std.ArrayListUnmanaged(u8){};
defer body.deinit(allocator);
const body_writer = body.writer(allocator);
@@ -660,9 +659,13 @@ pub const Session = struct {
}
try Packet.write(.flush, body_writer);
- var request = try session.transport.open(.POST, upload_pack_uri, headers, .{
- .handle_redirects = false,
+ var request = try session.transport.open(.POST, upload_pack_uri, .{
+ .redirect_behavior = .unhandled,
.server_header_buffer = options.server_header_buffer,
+ .extra_headers = &.{
+ .{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
+ .{ .name = "Git-Protocol", .value = "version=2" },
+ },
});
errdefer request.deinit();
request.transfer_encoding = .{ .content_length = body.items.len };
@@ -738,11 +741,6 @@ pub const Session = struct {
upload_pack_uri.query = null;
upload_pack_uri.fragment = null;
- var headers = std.http.Headers.init(allocator);
- defer headers.deinit();
- try headers.append("Content-Type", "application/x-git-upload-pack-request");
- try headers.append("Git-Protocol", "version=2");
-
var body = std.ArrayListUnmanaged(u8){};
defer body.deinit(allocator);
const body_writer = body.writer(allocator);
@@ -766,9 +764,13 @@ pub const Session = struct {
try Packet.write(.{ .data = "done\n" }, body_writer);
try Packet.write(.flush, body_writer);
- var request = try session.transport.open(.POST, upload_pack_uri, headers, .{
- .handle_redirects = false,
+ var request = try session.transport.open(.POST, upload_pack_uri, .{
+ .redirect_behavior = .not_allowed,
.server_header_buffer = http_headers_buffer,
+ .extra_headers = &.{
+ .{ .name = "Content-Type", .value = "application/x-git-upload-pack-request" },
+ .{ .name = "Git-Protocol", .value = "version=2" },
+ },
});
errdefer request.deinit();
request.transfer_encoding = .{ .content_length = body.items.len };
src/Package/Fetch.zig
@@ -898,10 +898,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
if (ascii.eqlIgnoreCase(uri.scheme, "http") or
ascii.eqlIgnoreCase(uri.scheme, "https"))
{
- var h: std.http.Headers = .{ .allocator = gpa };
- defer h.deinit();
-
- var req = http_client.open(.GET, uri, h, .{
+ var req = http_client.open(.GET, uri, .{
.server_header_buffer = server_header_buffer,
}) catch |err| {
return f.fail(f.location_tok, try eb.printString(
@@ -1043,7 +1040,7 @@ fn unpackResource(
.http_request => |req| ft: {
// Content-Type takes first precedence.
- const content_type = req.response.headers.getFirstValue("Content-Type") orelse
+ const content_type = req.response.content_type orelse
return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
// Extract the MIME type, ignoring charset and boundary directives
@@ -1076,7 +1073,7 @@ fn unpackResource(
}
// Next, the filename from 'content-disposition: attachment' takes precedence.
- if (req.response.headers.getFirstValue("Content-Disposition")) |cd_header| {
+ if (req.response.content_disposition) |cd_header| {
break :ft FileType.fromContentDisposition(cd_header) orelse {
return f.fail(f.location_tok, try eb.printString(
"unsupported Content-Disposition header value: '{s}' for Content-Type=application/octet-stream",
src/main.zig
@@ -5486,7 +5486,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
job_queue.read_only = true;
cleanup_build_dir = job_queue.global_cache.handle;
} else {
- try http_client.loadDefaultProxies();
+ try http_client.initDefaultProxies(arena);
}
try job_queue.all_fetches.ensureUnusedCapacity(gpa, 1);
@@ -7442,7 +7442,7 @@ fn cmdFetch(
var http_client: std.http.Client = .{ .allocator = gpa };
defer http_client.deinit();
- try http_client.loadDefaultProxies();
+ try http_client.initDefaultProxies(arena);
var progress: std.Progress = .{ .dont_print_on_dumb = true };
const root_prog_node = progress.start("Fetch", 0);
test/standalone/http.zig
@@ -26,8 +26,8 @@ fn handleRequest(res: *Server.Response) !void {
log.info("{} {s} {s}", .{ res.request.method, @tagName(res.request.version), res.request.target });
- if (res.request.headers.contains("expect")) {
- if (mem.eql(u8, res.request.headers.getFirstValue("expect").?, "100-continue")) {
+ if (res.request.expect) |expect| {
+ if (mem.eql(u8, expect, "100-continue")) {
res.status = .@"continue";
try res.send();
res.status = .ok;
@@ -41,8 +41,8 @@ fn handleRequest(res: *Server.Response) !void {
const body = try res.reader().readAllAlloc(salloc, 8192);
defer salloc.free(body);
- if (res.request.headers.contains("connection")) {
- try res.headers.append("connection", "keep-alive");
+ if (res.request.keep_alive) {
+ res.keep_alive = true;
}
if (mem.startsWith(u8, res.request.target, "/get")) {
@@ -52,7 +52,9 @@ fn handleRequest(res: *Server.Response) !void {
res.transfer_encoding = .{ .content_length = 14 };
}
- try res.headers.append("content-type", "text/plain");
+ res.extra_headers = &.{
+ .{ .name = "content-type", .value = "text/plain" },
+ };
try res.send();
if (res.request.method != .HEAD) {
@@ -82,14 +84,14 @@ fn handleRequest(res: *Server.Response) !void {
try res.finish();
} else if (mem.startsWith(u8, res.request.target, "/echo-content")) {
try testing.expectEqualStrings("Hello, World!\n", body);
- try testing.expectEqualStrings("text/plain", res.request.headers.getFirstValue("content-type").?);
-
- if (res.request.headers.contains("transfer-encoding")) {
- try testing.expectEqualStrings("chunked", res.request.headers.getFirstValue("transfer-encoding").?);
- res.transfer_encoding = .chunked;
- } else {
- res.transfer_encoding = .{ .content_length = 14 };
- try testing.expectEqualStrings("14", res.request.headers.getFirstValue("content-length").?);
+ try testing.expectEqualStrings("text/plain", res.request.content_type.?);
+
+ switch (res.request.transfer_encoding) {
+ .chunked => res.transfer_encoding = .chunked,
+ .none => {
+ res.transfer_encoding = .{ .content_length = 14 };
+ try testing.expectEqual(14, res.request.content_length.?);
+ },
}
try res.send();
@@ -108,7 +110,9 @@ fn handleRequest(res: *Server.Response) !void {
res.transfer_encoding = .chunked;
res.status = .found;
- try res.headers.append("location", "../../get");
+ res.extra_headers = &.{
+ .{ .name = "location", .value = "../../get" },
+ };
try res.send();
try res.writeAll("Hello, ");
@@ -118,7 +122,9 @@ fn handleRequest(res: *Server.Response) !void {
res.transfer_encoding = .chunked;
res.status = .found;
- try res.headers.append("location", "/redirect/1");
+ res.extra_headers = &.{
+ .{ .name = "location", .value = "/redirect/1" },
+ };
try res.send();
try res.writeAll("Hello, ");
@@ -131,7 +137,9 @@ fn handleRequest(res: *Server.Response) !void {
defer salloc.free(location);
res.status = .found;
- try res.headers.append("location", location);
+ res.extra_headers = &.{
+ .{ .name = "location", .value = location },
+ };
try res.send();
try res.writeAll("Hello, ");
@@ -141,7 +149,9 @@ fn handleRequest(res: *Server.Response) !void {
res.transfer_encoding = .chunked;
res.status = .found;
- try res.headers.append("location", "/redirect/3");
+ res.extra_headers = &.{
+ .{ .name = "location", .value = "/redirect/3" },
+ };
try res.send();
try res.writeAll("Hello, ");
@@ -153,7 +163,9 @@ fn handleRequest(res: *Server.Response) !void {
defer salloc.free(location);
res.status = .found;
- try res.headers.append("location", location);
+ res.extra_headers = &.{
+ .{ .name = "location", .value = location },
+ };
try res.send();
try res.finish();
} else {
@@ -234,19 +246,20 @@ pub fn main() !void {
errdefer client.deinit();
// defer client.deinit(); handled below
- try client.loadDefaultProxies();
+ var arena_instance = std.heap.ArenaAllocator.init(calloc);
+ defer arena_instance.deinit();
+ const arena = arena_instance.allocator();
- { // read content-length response
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
+ try client.initDefaultProxies(arena);
+ { // read content-length response
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -258,23 +271,20 @@ pub fn main() !void {
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
- try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
+ try testing.expectEqualStrings("text/plain", req.response.content_type.?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read large content-length response
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/large", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -292,16 +302,13 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send head request and not read chunked
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.HEAD, uri, h, .{
+ var req = try client.open(.HEAD, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -313,24 +320,21 @@ pub fn main() !void {
defer calloc.free(body);
try testing.expectEqualStrings("", body);
- try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
- try testing.expectEqualStrings("14", req.response.headers.getFirstValue("content-length").?);
+ try testing.expectEqualStrings("text/plain", req.response.content_type.?);
+ try testing.expectEqual(14, req.response.content_length.?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read chunked response
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get?chunked", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -342,23 +346,20 @@ pub fn main() !void {
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
- try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
+ try testing.expectEqualStrings("text/plain", req.response.content_type.?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send head request and not read chunked
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get?chunked", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.HEAD, uri, h, .{
+ var req = try client.open(.HEAD, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -370,24 +371,21 @@ pub fn main() !void {
defer calloc.free(body);
try testing.expectEqualStrings("", body);
- try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
- try testing.expectEqualStrings("chunked", req.response.headers.getFirstValue("transfer-encoding").?);
+ try testing.expectEqualStrings("text/plain", req.response.content_type.?);
+ try testing.expect(req.response.transfer_encoding == .chunked);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // check trailing headers
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/trailer", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -399,26 +397,25 @@ pub fn main() !void {
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
- try testing.expectEqualStrings("aaaa", req.response.headers.getFirstValue("x-checksum").?);
+ @panic("TODO implement inspecting custom headers in responses");
+ //try testing.expectEqualStrings("aaaa", req.response.headers.getFirstValue("x-checksum").?);
}
// connection has been kept alive
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // send content-length request
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
- try h.append("content-type", "text/plain");
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.POST, uri, h, .{
+ var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
+ .extra_headers = &.{
+ .{ .name = "content-type", .value = "text/plain" },
+ },
});
defer req.deinit();
@@ -441,19 +438,15 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // read content-length response with connection close
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
- try h.append("connection", "close");
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
+ .keep_alive = false,
});
defer req.deinit();
@@ -464,26 +457,24 @@ pub fn main() !void {
defer calloc.free(body);
try testing.expectEqualStrings("Hello, World!\n", body);
- try testing.expectEqualStrings("text/plain", req.response.headers.getFirstValue("content-type").?);
+ try testing.expectEqualStrings("text/plain", req.response.content_type.?);
}
// connection has been closed
try testing.expect(client.connection_pool.free_len == 0);
{ // send chunked request
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
- try h.append("content-type", "text/plain");
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.POST, uri, h, .{
+ var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
+ .extra_headers = &.{
+ .{ .name = "content-type", .value = "text/plain" },
+ },
});
defer req.deinit();
@@ -506,16 +497,13 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // relative redirect
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/1", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -533,16 +521,13 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // redirect from root
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/2", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -560,16 +545,13 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // absolute redirect
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/3", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -587,16 +569,13 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // too many redirects
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/4", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -612,16 +591,13 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // check client without segfault by connection error after redirection
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/redirect/invalid", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.GET, uri, h, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = &server_header_buffer,
});
defer req.deinit();
@@ -639,10 +615,6 @@ pub fn main() !void {
try testing.expect(client.http_proxy != null or client.connection_pool.free_len == 1);
{ // Client.fetch()
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
- try h.append("content-type", "text/plain");
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#fetch", .{port});
defer calloc.free(location);
@@ -651,8 +623,10 @@ pub fn main() !void {
var res = try client.fetch(calloc, .{
.location = .{ .url = location },
.method = .POST,
- .headers = h,
.payload = .{ .string = "Hello, World!\n" },
+ .extra_headers = &.{
+ .{ .name = "content-type", .value = "text/plain" },
+ },
});
defer res.deinit();
@@ -660,20 +634,18 @@ pub fn main() !void {
}
{ // expect: 100-continue
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
- try h.append("expect", "100-continue");
- try h.append("content-type", "text/plain");
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#expect-100", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.POST, uri, h, .{
+ var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
+ .extra_headers = &.{
+ .{ .name = "expect", .value = "100-continue" },
+ .{ .name = "content-type", .value = "text/plain" },
+ },
});
defer req.deinit();
@@ -694,20 +666,18 @@ pub fn main() !void {
}
{ // expect: garbage
- var h = http.Headers{ .allocator = calloc };
- defer h.deinit();
-
- try h.append("content-type", "text/plain");
- try h.append("expect", "garbage");
-
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#expect-garbage", .{port});
defer calloc.free(location);
const uri = try std.Uri.parse(location);
log.info("{s}", .{location});
var server_header_buffer: [1024]u8 = undefined;
- var req = try client.open(.POST, uri, h, .{
+ var req = try client.open(.POST, uri, .{
.server_header_buffer = &server_header_buffer,
+ .extra_headers = &.{
+ .{ .name = "content-type", .value = "text/plain" },
+ .{ .name = "expect", .value = "garbage" },
+ },
});
defer req.deinit();
@@ -734,7 +704,7 @@ pub fn main() !void {
for (0..total_connections) |i| {
const headers_buf = try calloc.alloc(u8, 1024);
try header_bufs.append(headers_buf);
- var req = try client.open(.GET, uri, .{ .allocator = calloc }, .{
+ var req = try client.open(.GET, uri, .{
.server_header_buffer = headers_buf,
});
req.response.parser.state = .complete;