master
1const builtin = @import("builtin");
2const std = @import("std.zig");
3const assert = std.debug.assert;
4const Writer = std.Io.Writer;
5const File = std.fs.File;
6
7pub const Client = @import("http/Client.zig");
8pub const Server = @import("http/Server.zig");
9pub const HeadParser = @import("http/HeadParser.zig");
10pub const ChunkParser = @import("http/ChunkParser.zig");
11pub const HeaderIterator = @import("http/HeaderIterator.zig");
12
13pub const Version = enum {
14 @"HTTP/1.0",
15 @"HTTP/1.1",
16};
17
18/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
19///
20/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
21///
22/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
23pub const Method = enum {
24 GET,
25 HEAD,
26 POST,
27 PUT,
28 DELETE,
29 CONNECT,
30 OPTIONS,
31 TRACE,
32 PATCH,
33
34 /// Returns true if a request of this method is allowed to have a body
35 /// Actual behavior from servers may vary and should still be checked
36 pub fn requestHasBody(m: Method) bool {
37 return switch (m) {
38 .POST, .PUT, .PATCH => true,
39 .GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
40 };
41 }
42
43 /// Returns true if a response to this method is allowed to have a body
44 /// Actual behavior from clients may vary and should still be checked
45 pub fn responseHasBody(m: Method) bool {
46 return switch (m) {
47 .GET, .POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
48 .HEAD, .TRACE => false,
49 };
50 }
51
52 /// An HTTP method is safe if it doesn't alter the state of the server.
53 ///
54 /// https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
55 ///
56 /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
57 pub fn safe(m: Method) bool {
58 return switch (m) {
59 .GET, .HEAD, .OPTIONS, .TRACE => true,
60 .POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
61 };
62 }
63
64 /// An HTTP method is idempotent if an identical request can be made once
65 /// or several times in a row with the same effect while leaving the server
66 /// in the same state.
67 ///
68 /// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
69 ///
70 /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
71 pub fn idempotent(m: Method) bool {
72 return switch (m) {
73 .GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
74 .CONNECT, .POST, .PATCH => false,
75 };
76 }
77
78 /// A cacheable response can be stored to be retrieved and used later,
79 /// saving a new request to the server.
80 ///
81 /// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
82 ///
83 /// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
84 pub fn cacheable(m: Method) bool {
85 return switch (m) {
86 .GET, .HEAD => true,
87 .POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
88 };
89 }
90};
91
92/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
93pub const Status = enum(u10) {
94 @"continue" = 100, // RFC7231, Section 6.2.1
95 switching_protocols = 101, // RFC7231, Section 6.2.2
96 processing = 102, // RFC2518
97 early_hints = 103, // RFC8297
98
99 ok = 200, // RFC7231, Section 6.3.1
100 created = 201, // RFC7231, Section 6.3.2
101 accepted = 202, // RFC7231, Section 6.3.3
102 non_authoritative_info = 203, // RFC7231, Section 6.3.4
103 no_content = 204, // RFC7231, Section 6.3.5
104 reset_content = 205, // RFC7231, Section 6.3.6
105 partial_content = 206, // RFC7233, Section 4.1
106 multi_status = 207, // RFC4918
107 already_reported = 208, // RFC5842
108 im_used = 226, // RFC3229
109
110 multiple_choice = 300, // RFC7231, Section 6.4.1
111 moved_permanently = 301, // RFC7231, Section 6.4.2
112 found = 302, // RFC7231, Section 6.4.3
113 see_other = 303, // RFC7231, Section 6.4.4
114 not_modified = 304, // RFC7232, Section 4.1
115 use_proxy = 305, // RFC7231, Section 6.4.5
116 temporary_redirect = 307, // RFC7231, Section 6.4.7
117 permanent_redirect = 308, // RFC7538
118
119 bad_request = 400, // RFC7231, Section 6.5.1
120 unauthorized = 401, // RFC7235, Section 3.1
121 payment_required = 402, // RFC7231, Section 6.5.2
122 forbidden = 403, // RFC7231, Section 6.5.3
123 not_found = 404, // RFC7231, Section 6.5.4
124 method_not_allowed = 405, // RFC7231, Section 6.5.5
125 not_acceptable = 406, // RFC7231, Section 6.5.6
126 proxy_auth_required = 407, // RFC7235, Section 3.2
127 request_timeout = 408, // RFC7231, Section 6.5.7
128 conflict = 409, // RFC7231, Section 6.5.8
129 gone = 410, // RFC7231, Section 6.5.9
130 length_required = 411, // RFC7231, Section 6.5.10
131 precondition_failed = 412, // RFC7232, Section 4.2][RFC8144, Section 3.2
132 payload_too_large = 413, // RFC7231, Section 6.5.11
133 uri_too_long = 414, // RFC7231, Section 6.5.12
134 unsupported_media_type = 415, // RFC7231, Section 6.5.13][RFC7694, Section 3
135 range_not_satisfiable = 416, // RFC7233, Section 4.4
136 expectation_failed = 417, // RFC7231, Section 6.5.14
137 teapot = 418, // RFC 7168, 2.3.3
138 misdirected_request = 421, // RFC7540, Section 9.1.2
139 unprocessable_entity = 422, // RFC4918
140 locked = 423, // RFC4918
141 failed_dependency = 424, // RFC4918
142 too_early = 425, // RFC8470
143 upgrade_required = 426, // RFC7231, Section 6.5.15
144 precondition_required = 428, // RFC6585
145 too_many_requests = 429, // RFC6585
146 request_header_fields_too_large = 431, // RFC6585
147 unavailable_for_legal_reasons = 451, // RFC7725
148
149 internal_server_error = 500, // RFC7231, Section 6.6.1
150 not_implemented = 501, // RFC7231, Section 6.6.2
151 bad_gateway = 502, // RFC7231, Section 6.6.3
152 service_unavailable = 503, // RFC7231, Section 6.6.4
153 gateway_timeout = 504, // RFC7231, Section 6.6.5
154 http_version_not_supported = 505, // RFC7231, Section 6.6.6
155 variant_also_negotiates = 506, // RFC2295
156 insufficient_storage = 507, // RFC4918
157 loop_detected = 508, // RFC5842
158 not_extended = 510, // RFC2774
159 network_authentication_required = 511, // RFC6585
160
161 _,
162
163 pub fn phrase(self: Status) ?[]const u8 {
164 return switch (self) {
165 // 1xx statuses
166 .@"continue" => "Continue",
167 .switching_protocols => "Switching Protocols",
168 .processing => "Processing",
169 .early_hints => "Early Hints",
170
171 // 2xx statuses
172 .ok => "OK",
173 .created => "Created",
174 .accepted => "Accepted",
175 .non_authoritative_info => "Non-Authoritative Information",
176 .no_content => "No Content",
177 .reset_content => "Reset Content",
178 .partial_content => "Partial Content",
179 .multi_status => "Multi-Status",
180 .already_reported => "Already Reported",
181 .im_used => "IM Used",
182
183 // 3xx statuses
184 .multiple_choice => "Multiple Choice",
185 .moved_permanently => "Moved Permanently",
186 .found => "Found",
187 .see_other => "See Other",
188 .not_modified => "Not Modified",
189 .use_proxy => "Use Proxy",
190 .temporary_redirect => "Temporary Redirect",
191 .permanent_redirect => "Permanent Redirect",
192
193 // 4xx statuses
194 .bad_request => "Bad Request",
195 .unauthorized => "Unauthorized",
196 .payment_required => "Payment Required",
197 .forbidden => "Forbidden",
198 .not_found => "Not Found",
199 .method_not_allowed => "Method Not Allowed",
200 .not_acceptable => "Not Acceptable",
201 .proxy_auth_required => "Proxy Authentication Required",
202 .request_timeout => "Request Timeout",
203 .conflict => "Conflict",
204 .gone => "Gone",
205 .length_required => "Length Required",
206 .precondition_failed => "Precondition Failed",
207 .payload_too_large => "Payload Too Large",
208 .uri_too_long => "URI Too Long",
209 .unsupported_media_type => "Unsupported Media Type",
210 .range_not_satisfiable => "Range Not Satisfiable",
211 .expectation_failed => "Expectation Failed",
212 .teapot => "I'm a teapot",
213 .misdirected_request => "Misdirected Request",
214 .unprocessable_entity => "Unprocessable Entity",
215 .locked => "Locked",
216 .failed_dependency => "Failed Dependency",
217 .too_early => "Too Early",
218 .upgrade_required => "Upgrade Required",
219 .precondition_required => "Precondition Required",
220 .too_many_requests => "Too Many Requests",
221 .request_header_fields_too_large => "Request Header Fields Too Large",
222 .unavailable_for_legal_reasons => "Unavailable For Legal Reasons",
223
224 // 5xx statuses
225 .internal_server_error => "Internal Server Error",
226 .not_implemented => "Not Implemented",
227 .bad_gateway => "Bad Gateway",
228 .service_unavailable => "Service Unavailable",
229 .gateway_timeout => "Gateway Timeout",
230 .http_version_not_supported => "HTTP Version Not Supported",
231 .variant_also_negotiates => "Variant Also Negotiates",
232 .insufficient_storage => "Insufficient Storage",
233 .loop_detected => "Loop Detected",
234 .not_extended => "Not Extended",
235 .network_authentication_required => "Network Authentication Required",
236
237 else => return null,
238 };
239 }
240
241 pub const Class = enum {
242 informational,
243 success,
244 redirect,
245 client_error,
246 server_error,
247 };
248
249 pub fn class(self: Status) Class {
250 return switch (@intFromEnum(self)) {
251 100...199 => .informational,
252 200...299 => .success,
253 300...399 => .redirect,
254 400...499 => .client_error,
255 else => .server_error,
256 };
257 }
258
259 test {
260 try std.testing.expectEqualStrings("OK", Status.ok.phrase().?);
261 try std.testing.expectEqualStrings("Not Found", Status.not_found.phrase().?);
262 }
263
264 test {
265 try std.testing.expectEqual(Status.Class.success, Status.ok.class());
266 try std.testing.expectEqual(Status.Class.client_error, Status.not_found.class());
267 }
268};
269
270/// compression is intentionally omitted here since it is handled in `ContentEncoding`.
271pub const TransferEncoding = enum {
272 chunked,
273 none,
274};
275
276pub const ContentEncoding = enum {
277 zstd,
278 gzip,
279 deflate,
280 compress,
281 identity,
282
283 pub fn fromString(s: []const u8) ?ContentEncoding {
284 const map = std.StaticStringMap(ContentEncoding).initComptime(.{
285 .{ "zstd", .zstd },
286 .{ "gzip", .gzip },
287 .{ "x-gzip", .gzip },
288 .{ "deflate", .deflate },
289 .{ "compress", .compress },
290 .{ "x-compress", .compress },
291 .{ "identity", .identity },
292 });
293 return map.get(s);
294 }
295
296 pub fn minBufferCapacity(ce: ContentEncoding) usize {
297 return switch (ce) {
298 .zstd => std.compress.zstd.default_window_len,
299 .gzip, .deflate => std.compress.flate.max_window_len,
300 .compress, .identity => 0,
301 };
302 }
303};
304
305pub const Connection = enum {
306 keep_alive,
307 close,
308};
309
310pub const Header = struct {
311 name: []const u8,
312 value: []const u8,
313};
314
315pub const Reader = struct {
316 in: *std.Io.Reader,
317 /// This is preallocated memory that might be used by `bodyReader`. That
318 /// function might return a pointer to this field, or a different
319 /// `*std.Io.Reader`. Advisable to not access this field directly.
320 interface: std.Io.Reader,
321 /// Keeps track of whether the stream is ready to accept a new request,
322 /// making invalid API usage cause assertion failures rather than HTTP
323 /// protocol violations.
324 state: State,
325 /// HTTP trailer bytes. These are at the end of a transfer-encoding:
326 /// chunked message. This data is available only after calling one of the
327 /// "end" functions and points to data inside the buffer of `in`, and is
328 /// therefore invalidated on the next call to `receiveHead`, or any other
329 /// read from `in`.
330 trailers: []const u8 = &.{},
331 body_err: ?BodyError = null,
332 max_head_len: usize,
333
334 pub const RemainingChunkLen = enum(u64) {
335 head = 0,
336 n = 1,
337 rn = 2,
338 _,
339
340 pub fn init(integer: u64) RemainingChunkLen {
341 return @enumFromInt(integer);
342 }
343
344 pub fn int(rcl: RemainingChunkLen) u64 {
345 return @intFromEnum(rcl);
346 }
347 };
348
349 pub const State = union(enum) {
350 /// The stream is available to be used for the first time, or reused.
351 ready,
352 received_head,
353 /// The stream goes until the connection is closed.
354 body_none,
355 body_remaining_content_length: u64,
356 body_remaining_chunk_len: RemainingChunkLen,
357 /// The stream would be eligible for another HTTP request, however the
358 /// client and server did not negotiate a persistent connection.
359 closing,
360 };
361
362 pub const BodyError = error{
363 HttpChunkInvalid,
364 HttpChunkTruncated,
365 HttpHeadersOversize,
366 };
367
368 pub const HeadError = error{
369 /// Too many bytes of HTTP headers.
370 ///
371 /// The HTTP specification suggests to respond with a 431 status code
372 /// before closing the connection.
373 HttpHeadersOversize,
374 /// Partial HTTP request was received but the connection was closed
375 /// before fully receiving the headers.
376 HttpRequestTruncated,
377 /// The client sent 0 bytes of headers before closing the stream. This
378 /// happens when a keep-alive connection is finally closed.
379 HttpConnectionClosing,
380 /// Transitive error occurred reading from `in`.
381 ReadFailed,
382 };
383
384 /// Buffers the entire head inside `in`.
385 ///
386 /// The resulting memory is invalidated by any subsequent consumption of
387 /// the input stream.
388 pub fn receiveHead(reader: *Reader) HeadError![]const u8 {
389 reader.trailers = &.{};
390 const in = reader.in;
391 const max_head_len = reader.max_head_len;
392 var hp: HeadParser = .{};
393 var head_len: usize = 0;
394 while (true) {
395 if (head_len >= max_head_len) return error.HttpHeadersOversize;
396 const remaining = in.buffered()[head_len..];
397 if (remaining.len == 0) {
398 in.fillMore() catch |err| switch (err) {
399 error.EndOfStream => switch (head_len) {
400 0 => return error.HttpConnectionClosing,
401 else => return error.HttpRequestTruncated,
402 },
403 error.ReadFailed => return error.ReadFailed,
404 };
405 continue;
406 }
407 head_len += hp.feed(remaining);
408 if (hp.state == .finished) {
409 reader.state = .received_head;
410 const head_buffer = in.buffered()[0..head_len];
411 in.toss(head_len);
412 return head_buffer;
413 }
414 }
415 }
416
417 /// If compressed body has been negotiated this will return compressed bytes.
418 ///
419 /// Asserts only called once and after `receiveHead`.
420 ///
421 /// See also:
422 /// * `interfaceDecompressing`
423 pub fn bodyReader(
424 reader: *Reader,
425 transfer_buffer: []u8,
426 transfer_encoding: TransferEncoding,
427 content_length: ?u64,
428 ) *std.Io.Reader {
429 assert(reader.state == .received_head);
430 switch (transfer_encoding) {
431 .chunked => {
432 reader.state = .{ .body_remaining_chunk_len = .head };
433 reader.interface = .{
434 .buffer = transfer_buffer,
435 .seek = 0,
436 .end = 0,
437 .vtable = &.{
438 .stream = chunkedStream,
439 .discard = chunkedDiscard,
440 },
441 };
442 return &reader.interface;
443 },
444 .none => {
445 if (content_length) |len| {
446 reader.state = .{ .body_remaining_content_length = len };
447 reader.interface = .{
448 .buffer = transfer_buffer,
449 .seek = 0,
450 .end = 0,
451 .vtable = &.{
452 .stream = contentLengthStream,
453 .discard = contentLengthDiscard,
454 },
455 };
456 return &reader.interface;
457 } else {
458 reader.state = .body_none;
459 return reader.in;
460 }
461 },
462 }
463 }
464
465 /// If compressed body has been negotiated this will return decompressed bytes.
466 ///
467 /// Asserts only called once and after `receiveHead`.
468 ///
469 /// See also:
470 /// * `interface`
471 pub fn bodyReaderDecompressing(
472 reader: *Reader,
473 transfer_buffer: []u8,
474 transfer_encoding: TransferEncoding,
475 content_length: ?u64,
476 content_encoding: ContentEncoding,
477 decompress: *Decompress,
478 decompress_buffer: []u8,
479 ) *std.Io.Reader {
480 if (transfer_encoding == .none and content_length == null) {
481 assert(reader.state == .received_head);
482 reader.state = .body_none;
483 switch (content_encoding) {
484 .identity => {
485 return reader.in;
486 },
487 .deflate => {
488 decompress.* = .{ .flate = .init(reader.in, .zlib, decompress_buffer) };
489 return &decompress.flate.reader;
490 },
491 .gzip => {
492 decompress.* = .{ .flate = .init(reader.in, .gzip, decompress_buffer) };
493 return &decompress.flate.reader;
494 },
495 .zstd => {
496 decompress.* = .{ .zstd = .init(reader.in, decompress_buffer, .{ .verify_checksum = false }) };
497 return &decompress.zstd.reader;
498 },
499 .compress => unreachable,
500 }
501 }
502 const transfer_reader = bodyReader(reader, transfer_buffer, transfer_encoding, content_length);
503 return decompress.init(transfer_reader, decompress_buffer, content_encoding);
504 }
505
506 fn contentLengthStream(
507 io_r: *std.Io.Reader,
508 w: *Writer,
509 limit: std.Io.Limit,
510 ) std.Io.Reader.StreamError!usize {
511 const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
512 const remaining_content_length = &reader.state.body_remaining_content_length;
513 const remaining = remaining_content_length.*;
514 if (remaining == 0) {
515 reader.state = .ready;
516 return error.EndOfStream;
517 }
518 const n = try reader.in.stream(w, limit.min(.limited64(remaining)));
519 remaining_content_length.* = remaining - n;
520 return n;
521 }
522
523 fn contentLengthDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
524 const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
525 const remaining_content_length = &reader.state.body_remaining_content_length;
526 const remaining = remaining_content_length.*;
527 if (remaining == 0) {
528 reader.state = .ready;
529 return error.EndOfStream;
530 }
531 const n = try reader.in.discard(limit.min(.limited64(remaining)));
532 remaining_content_length.* = remaining - n;
533 return n;
534 }
535
536 fn chunkedStream(io_r: *std.Io.Reader, w: *Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
537 const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
538 const chunk_len_ptr = switch (reader.state) {
539 .ready => return error.EndOfStream,
540 .body_remaining_chunk_len => |*x| x,
541 else => unreachable,
542 };
543 return chunkedReadEndless(reader, w, limit, chunk_len_ptr) catch |err| switch (err) {
544 error.ReadFailed => return error.ReadFailed,
545 error.WriteFailed => return error.WriteFailed,
546 error.EndOfStream => {
547 reader.body_err = error.HttpChunkTruncated;
548 return error.ReadFailed;
549 },
550 else => |e| {
551 reader.body_err = e;
552 return error.ReadFailed;
553 },
554 };
555 }
556
557 fn chunkedReadEndless(
558 reader: *Reader,
559 w: *Writer,
560 limit: std.Io.Limit,
561 chunk_len_ptr: *RemainingChunkLen,
562 ) (BodyError || std.Io.Reader.StreamError)!usize {
563 const in = reader.in;
564 len: switch (chunk_len_ptr.*) {
565 .head => {
566 var cp: ChunkParser = .init;
567 while (true) {
568 const i = cp.feed(in.buffered());
569 switch (cp.state) {
570 .invalid => return error.HttpChunkInvalid,
571 .data => {
572 in.toss(i);
573 break;
574 },
575 else => {
576 in.toss(i);
577 try in.fillMore();
578 continue;
579 },
580 }
581 }
582 if (cp.chunk_len == 0) return parseTrailers(reader, 0);
583 const n = try in.stream(w, limit.min(.limited64(cp.chunk_len)));
584 chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
585 return n;
586 },
587 .n => {
588 if ((try in.peekByte()) != '\n') return error.HttpChunkInvalid;
589 in.toss(1);
590 continue :len .head;
591 },
592 .rn => {
593 const rn = try in.peekArray(2);
594 if (rn[0] != '\r' or rn[1] != '\n') return error.HttpChunkInvalid;
595 in.toss(2);
596 continue :len .head;
597 },
598 else => |remaining_chunk_len| {
599 const n = try in.stream(w, limit.min(.limited64(@intFromEnum(remaining_chunk_len) - 2)));
600 chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
601 return n;
602 },
603 }
604 }
605
606 fn chunkedDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
607 const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
608 const chunk_len_ptr = switch (reader.state) {
609 .ready => return error.EndOfStream,
610 .body_remaining_chunk_len => |*x| x,
611 else => unreachable,
612 };
613 return chunkedDiscardEndless(reader, limit, chunk_len_ptr) catch |err| switch (err) {
614 error.ReadFailed => return error.ReadFailed,
615 error.EndOfStream => {
616 reader.body_err = error.HttpChunkTruncated;
617 return error.ReadFailed;
618 },
619 else => |e| {
620 reader.body_err = e;
621 return error.ReadFailed;
622 },
623 };
624 }
625
626 fn chunkedDiscardEndless(
627 reader: *Reader,
628 limit: std.Io.Limit,
629 chunk_len_ptr: *RemainingChunkLen,
630 ) (BodyError || std.Io.Reader.Error)!usize {
631 const in = reader.in;
632 len: switch (chunk_len_ptr.*) {
633 .head => {
634 var cp: ChunkParser = .init;
635 while (true) {
636 const i = cp.feed(in.buffered());
637 switch (cp.state) {
638 .invalid => return error.HttpChunkInvalid,
639 .data => {
640 in.toss(i);
641 break;
642 },
643 else => {
644 in.toss(i);
645 try in.fillMore();
646 continue;
647 },
648 }
649 }
650 if (cp.chunk_len == 0) return parseTrailers(reader, 0);
651 const n = try in.discard(limit.min(.limited64(cp.chunk_len)));
652 chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
653 return n;
654 },
655 .n => {
656 if ((try in.peekByte()) != '\n') return error.HttpChunkInvalid;
657 in.toss(1);
658 continue :len .head;
659 },
660 .rn => {
661 const rn = try in.peekArray(2);
662 if (rn[0] != '\r' or rn[1] != '\n') return error.HttpChunkInvalid;
663 in.toss(2);
664 continue :len .head;
665 },
666 else => |remaining_chunk_len| {
667 const n = try in.discard(limit.min(.limited64(remaining_chunk_len.int() - 2)));
668 chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
669 return n;
670 },
671 }
672 }
673
674 /// Called when next bytes in the stream are trailers, or "\r\n" to indicate
675 /// end of chunked body.
676 fn parseTrailers(reader: *Reader, amt_read: usize) (BodyError || std.Io.Reader.Error)!usize {
677 const in = reader.in;
678 const rn = try in.peekArray(2);
679 if (rn[0] == '\r' and rn[1] == '\n') {
680 in.toss(2);
681 reader.state = .ready;
682 assert(reader.trailers.len == 0);
683 return amt_read;
684 }
685 var hp: HeadParser = .{ .state = .seen_rn };
686 var trailers_len: usize = 2;
687 while (true) {
688 if (in.buffer.len - trailers_len == 0) return error.HttpHeadersOversize;
689 const remaining = in.buffered()[trailers_len..];
690 if (remaining.len == 0) {
691 try in.fillMore();
692 continue;
693 }
694 trailers_len += hp.feed(remaining);
695 if (hp.state == .finished) {
696 reader.state = .ready;
697 reader.trailers = in.buffered()[0..trailers_len];
698 in.toss(trailers_len);
699 return amt_read;
700 }
701 }
702 }
703};
704
705pub const Decompress = union(enum) {
706 flate: std.compress.flate.Decompress,
707 zstd: std.compress.zstd.Decompress,
708 none: *std.Io.Reader,
709
710 pub fn init(
711 decompress: *Decompress,
712 transfer_reader: *std.Io.Reader,
713 buffer: []u8,
714 content_encoding: ContentEncoding,
715 ) *std.Io.Reader {
716 switch (content_encoding) {
717 .identity => {
718 decompress.* = .{ .none = transfer_reader };
719 return transfer_reader;
720 },
721 .deflate => {
722 decompress.* = .{ .flate = .init(transfer_reader, .zlib, buffer) };
723 return &decompress.flate.reader;
724 },
725 .gzip => {
726 decompress.* = .{ .flate = .init(transfer_reader, .gzip, buffer) };
727 return &decompress.flate.reader;
728 },
729 .zstd => {
730 decompress.* = .{ .zstd = .init(transfer_reader, buffer, .{ .verify_checksum = false }) };
731 return &decompress.zstd.reader;
732 },
733 .compress => unreachable,
734 }
735 }
736};
737
738/// Request or response body.
739pub const BodyWriter = struct {
740 /// Until the lifetime of `BodyWriter` ends, it is illegal to modify the
741 /// state of this other than via methods of `BodyWriter`.
742 http_protocol_output: *Writer,
743 state: State,
744 writer: Writer,
745
746 pub const Error = Writer.Error;
747
748 /// How many zeroes to reserve for hex-encoded chunk length.
749 const chunk_len_digits = 8;
750 const max_chunk_len: usize = std.math.pow(u64, 16, chunk_len_digits) - 1;
751 const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
752
753 comptime {
754 assert(max_chunk_len == std.math.maxInt(u32));
755 }
756
757 pub const State = union(enum) {
758 /// End of connection signals the end of the stream.
759 none,
760 /// As a debugging utility, counts down to zero as bytes are written.
761 content_length: u64,
762 /// Each chunk is wrapped in a header and trailer.
763 /// This length is the the number of bytes to be written before the
764 /// next header. This includes +2 for the `\r\n` trailer and is zero
765 /// for the beginning of the stream.
766 chunk_len: usize,
767 /// Cleanly finished stream; connection can be reused.
768 end,
769
770 pub const init_chunked: State = .{ .chunk_len = 0 };
771 };
772
773 pub fn isEliding(w: *const BodyWriter) bool {
774 return w.writer.vtable.drain == elidingDrain;
775 }
776
777 /// Sends all buffered data across `BodyWriter.http_protocol_output`.
778 pub fn flush(w: *BodyWriter) Error!void {
779 const out = w.http_protocol_output;
780 switch (w.state) {
781 .end, .none, .content_length, .chunk_len => return out.flush(),
782 }
783 }
784
785 /// When using content-length, asserts that the amount of data sent matches
786 /// the value sent in the header, then flushes `http_protocol_output`.
787 ///
788 /// When using transfer-encoding: chunked, writes the end-of-stream message
789 /// with empty trailers, then flushes the stream to the system. Asserts any
790 /// started chunk has been completely finished.
791 ///
792 /// Respects the value of `isEliding` to omit all data after the headers.
793 ///
794 /// See also:
795 /// * `endUnflushed`
796 /// * `endChunked`
797 pub fn end(w: *BodyWriter) Error!void {
798 try endUnflushed(w);
799 try w.http_protocol_output.flush();
800 }
801
802 /// When using content-length, asserts that the amount of data sent matches
803 /// the value sent in the header.
804 ///
805 /// Otherwise, transfer-encoding: chunked is being used, and it writes the
806 /// end-of-stream message with empty trailers.
807 ///
808 /// Respects the value of `isEliding` to omit all data after the headers.
809 ///
810 /// Does not flush `http_protocol_output`, but does flush `writer`.
811 ///
812 /// See also:
813 /// * `end`
814 /// * `endChunked`
815 pub fn endUnflushed(w: *BodyWriter) Error!void {
816 try w.writer.flush();
817 switch (w.state) {
818 .end => unreachable,
819 .content_length => |len| {
820 assert(len == 0); // Trips when end() called before all bytes written.
821 w.state = .end;
822 },
823 .none => {},
824 .chunk_len => return endChunkedUnflushed(w, .{}),
825 }
826 }
827
828 pub const EndChunkedOptions = struct {
829 trailers: []const Header = &.{},
830 };
831
832 /// Writes the end-of-stream message and any optional trailers, flushing
833 /// the underlying stream.
834 ///
835 /// Asserts that the BodyWriter is using transfer-encoding: chunked.
836 ///
837 /// Respects the value of `isEliding` to omit all data after the headers.
838 ///
839 /// See also:
840 /// * `endChunkedUnflushed`
841 /// * `end`
842 pub fn endChunked(w: *BodyWriter, options: EndChunkedOptions) Error!void {
843 try endChunkedUnflushed(w, options);
844 try w.http_protocol_output.flush();
845 }
846
847 /// Writes the end-of-stream message and any optional trailers.
848 ///
849 /// Does not flush.
850 ///
851 /// Asserts that the BodyWriter is using transfer-encoding: chunked.
852 ///
853 /// Respects the value of `isEliding` to omit all data after the headers.
854 ///
855 /// See also:
856 /// * `endChunked`
857 /// * `endUnflushed`
858 /// * `end`
859 pub fn endChunkedUnflushed(w: *BodyWriter, options: EndChunkedOptions) Error!void {
860 if (w.isEliding()) {
861 w.state = .end;
862 return;
863 }
864 const bw = w.http_protocol_output;
865 switch (w.state.chunk_len) {
866 0 => {},
867 1 => unreachable, // Wrote more data than specified in chunk header.
868 2 => try bw.writeAll("\r\n"),
869 else => unreachable, // An earlier write call indicated more data would follow.
870 }
871 try bw.writeAll("0\r\n");
872 for (options.trailers) |trailer| {
873 try bw.writeAll(trailer.name);
874 try bw.writeAll(": ");
875 try bw.writeAll(trailer.value);
876 try bw.writeAll("\r\n");
877 }
878 try bw.writeAll("\r\n");
879 w.state = .end;
880 }
881
882 pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
883 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
884 assert(!bw.isEliding());
885 const out = bw.http_protocol_output;
886 const n = try out.writeSplatHeader(w.buffered(), data, splat);
887 bw.state.content_length -= n;
888 return w.consume(n);
889 }
890
891 pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
892 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
893 assert(!bw.isEliding());
894 const out = bw.http_protocol_output;
895 const n = try out.writeSplatHeader(w.buffered(), data, splat);
896 return w.consume(n);
897 }
898
899 pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
900 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
901 const slice = data[0 .. data.len - 1];
902 const pattern = data[slice.len];
903 var written: usize = pattern.len * splat;
904 for (slice) |bytes| written += bytes.len;
905 switch (bw.state) {
906 .content_length => |*len| len.* -= written + w.end,
907 else => {},
908 }
909 w.end = 0;
910 return written;
911 }
912
913 pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
914 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
915 if (File.Handle == void) return error.Unimplemented;
916 if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented;
917 switch (bw.state) {
918 .content_length => |*len| len.* -= w.end,
919 else => {},
920 }
921 w.end = 0;
922 if (limit == .nothing) return 0;
923 if (file_reader.getSize()) |size| {
924 const n = limit.minInt64(size - file_reader.pos);
925 if (n == 0) return error.EndOfStream;
926 file_reader.seekBy(@intCast(n)) catch return error.Unimplemented;
927 switch (bw.state) {
928 .content_length => |*len| len.* -= n,
929 else => {},
930 }
931 return n;
932 } else |_| {
933 // Error is observable on `file_reader` instance, and it is better to
934 // treat the file as a pipe.
935 return error.Unimplemented;
936 }
937 }
938
939 /// Returns `null` if size cannot be computed without making any syscalls.
940 pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
941 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
942 assert(!bw.isEliding());
943 const out = bw.http_protocol_output;
944 const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
945 return w.consume(n);
946 }
947
948 pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
949 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
950 assert(!bw.isEliding());
951 const out = bw.http_protocol_output;
952 const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
953 bw.state.content_length -= n;
954 return w.consume(n);
955 }
956
957 pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
958 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
959 assert(!bw.isEliding());
960 const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse {
961 // If the file size is unknown, we cannot lower to a `sendFile` since we would
962 // have to flush the chunk header before knowing the chunk length.
963 return error.Unimplemented;
964 };
965 if (data_len == 0) return error.EndOfStream;
966 const out = bw.http_protocol_output;
967 l: switch (bw.state.chunk_len) {
968 0 => {
969 const header_buf = try out.writableArray(chunk_header_template.len);
970 @memcpy(header_buf, chunk_header_template);
971 writeHex(header_buf[0..chunk_len_digits], data_len);
972 bw.state.chunk_len = data_len + 2;
973 continue :l bw.state.chunk_len;
974 },
975 1 => unreachable, // Wrote more data than specified in chunk header.
976 2 => {
977 try out.writeAll("\r\n");
978 bw.state.chunk_len = 0;
979 continue :l 0;
980 },
981 else => {
982 const chunk_limit: std.Io.Limit = .limited(bw.state.chunk_len - 2);
983 const n = if (chunk_limit.subtract(w.buffered().len)) |sendfile_limit|
984 try out.sendFileHeader(w.buffered(), file_reader, sendfile_limit.min(limit))
985 else
986 try out.write(chunk_limit.slice(w.buffered()));
987 bw.state.chunk_len -= n;
988 return w.consume(n);
989 },
990 }
991 }
992
993 pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
994 const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
995 assert(!bw.isEliding());
996 const out = bw.http_protocol_output;
997 const data_len = w.end + Writer.countSplat(data, splat);
998 l: switch (bw.state.chunk_len) {
999 0 => {
1000 const header_buf = try out.writableArray(chunk_header_template.len);
1001 @memcpy(header_buf, chunk_header_template);
1002 writeHex(header_buf[0..chunk_len_digits], data_len);
1003 bw.state.chunk_len = data_len + 2;
1004 continue :l bw.state.chunk_len;
1005 },
1006 1 => unreachable, // Wrote more data than specified in chunk header.
1007 2 => {
1008 try out.writeAll("\r\n");
1009 bw.state.chunk_len = 0;
1010 continue :l 0;
1011 },
1012 else => {
1013 const n = try out.writeSplatHeaderLimit(w.buffered(), data, splat, .limited(bw.state.chunk_len - 2));
1014 bw.state.chunk_len -= n;
1015 return w.consume(n);
1016 },
1017 }
1018 }
1019
1020 /// Writes an integer as base 16 to `buf`, right-aligned, assuming the
1021 /// buffer has already been filled with zeroes.
1022 fn writeHex(buf: []u8, x: usize) void {
1023 assert(std.mem.allEqual(u8, buf, '0'));
1024 const base = 16;
1025 var index: usize = buf.len;
1026 var a = x;
1027 while (a > 0) {
1028 const digit = a % base;
1029 index -= 1;
1030 buf[index] = std.fmt.digitToChar(@intCast(digit), .lower);
1031 a /= base;
1032 }
1033 }
1034};
1035
1036test {
1037 _ = Server;
1038 _ = Status;
1039 _ = Method;
1040 _ = ChunkParser;
1041 _ = HeadParser;
1042
1043 if (builtin.os.tag != .wasi) {
1044 _ = Client;
1045 _ = @import("http/test.zig");
1046 }
1047}