Commit 3837862e52

Andrew Kelley <andrew@ziglang.org>
2025-08-05 05:35:07
fix 32-bit builds
1 parent abd7693
Changed files (3)
lib
std
lib/std/crypto/tls/Client.zig
@@ -910,7 +910,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
 }
 
 fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {
-    const c: *Client = @fieldParentPtr("writer", w);
+    const c: *Client = @alignCast(@fieldParentPtr("writer", w));
     if (true) @panic("update to use the buffer and flush");
     const sliced_data = if (splat == 0) data[0..data.len -| 1] else data;
     const output = c.output;
@@ -1046,7 +1046,7 @@ pub fn eof(c: Client) bool {
 }
 
 fn stream(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
-    const c: *Client = @fieldParentPtr("reader", r);
+    const c: *Client = @alignCast(@fieldParentPtr("reader", r));
     if (c.eof()) return error.EndOfStream;
     const input = c.input;
     // If at least one full encrypted record is not buffered, read once.
lib/std/http/Client.zig
@@ -82,7 +82,7 @@ pub const ConnectionPool = struct {
 
         var next = pool.free.last;
         while (next) |node| : (next = node.prev) {
-            const connection: *Connection = @fieldParentPtr("pool_node", node);
+            const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
             if (connection.protocol != criteria.protocol) continue;
             if (connection.port != criteria.port) continue;
 
@@ -127,7 +127,7 @@ pub const ConnectionPool = struct {
         if (connection.closing or pool.free_size == 0) return connection.destroy();
 
         if (pool.free_len >= pool.free_size) {
-            const popped: *Connection = @fieldParentPtr("pool_node", pool.free.popFirst().?);
+            const popped: *Connection = @alignCast(@fieldParentPtr("pool_node", pool.free.popFirst().?));
             pool.free_len -= 1;
 
             popped.destroy();
@@ -183,14 +183,14 @@ pub const ConnectionPool = struct {
 
         var next = pool.free.first;
         while (next) |node| {
-            const connection: *Connection = @fieldParentPtr("pool_node", node);
+            const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
             next = node.next;
             connection.destroy();
         }
 
         next = pool.used.first;
         while (next) |node| {
-            const connection: *Connection = @fieldParentPtr("pool_node", node);
+            const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
             next = node.next;
             connection.destroy();
         }
@@ -366,11 +366,11 @@ pub const Connection = struct {
         return switch (c.protocol) {
             .tls => {
                 if (disable_tls) unreachable;
-                const tls: *Tls = @fieldParentPtr("connection", c);
+                const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
                 return tls.host();
             },
             .plain => {
-                const plain: *Plain = @fieldParentPtr("connection", c);
+                const plain: *Plain = @alignCast(@fieldParentPtr("connection", c));
                 return plain.host();
             },
         };
@@ -383,11 +383,11 @@ pub const Connection = struct {
         switch (c.protocol) {
             .tls => {
                 if (disable_tls) unreachable;
-                const tls: *Tls = @fieldParentPtr("connection", c);
+                const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
                 tls.destroy();
             },
             .plain => {
-                const plain: *Plain = @fieldParentPtr("connection", c);
+                const plain: *Plain = @alignCast(@fieldParentPtr("connection", c));
                 plain.destroy();
             },
         }
@@ -399,7 +399,7 @@ pub const Connection = struct {
         return switch (c.protocol) {
             .tls => {
                 if (disable_tls) unreachable;
-                const tls: *Tls = @fieldParentPtr("connection", c);
+                const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
                 return &tls.client.writer;
             },
             .plain => &c.stream_writer.interface,
@@ -412,7 +412,7 @@ pub const Connection = struct {
         return switch (c.protocol) {
             .tls => {
                 if (disable_tls) unreachable;
-                const tls: *Tls = @fieldParentPtr("connection", c);
+                const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
                 return &tls.client.reader;
             },
             .plain => c.stream_reader.interface(),
@@ -422,7 +422,7 @@ pub const Connection = struct {
     pub fn flush(c: *Connection) Writer.Error!void {
         if (c.protocol == .tls) {
             if (disable_tls) unreachable;
-            const tls: *Tls = @fieldParentPtr("connection", c);
+            const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
             try tls.client.writer.flush();
         }
         try c.stream_writer.interface.flush();
@@ -434,7 +434,7 @@ pub const Connection = struct {
     pub fn end(c: *Connection) Writer.Error!void {
         if (c.protocol == .tls) {
             if (disable_tls) unreachable;
-            const tls: *Tls = @fieldParentPtr("connection", c);
+            const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
             try tls.client.end();
             try tls.client.writer.flush();
         }
lib/std/http.zig
@@ -519,33 +519,33 @@ pub const Reader = struct {
         w: *Writer,
         limit: std.Io.Limit,
     ) std.Io.Reader.StreamError!usize {
-        const reader: *Reader = @fieldParentPtr("interface", io_r);
+        const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
         const remaining_content_length = &reader.state.body_remaining_content_length;
         const remaining = remaining_content_length.*;
         if (remaining == 0) {
             reader.state = .ready;
             return error.EndOfStream;
         }
-        const n = try reader.in.stream(w, limit.min(.limited(remaining)));
+        const n = try reader.in.stream(w, limit.min(.limited64(remaining)));
         remaining_content_length.* = remaining - n;
         return n;
     }
 
     fn contentLengthDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
-        const reader: *Reader = @fieldParentPtr("interface", io_r);
+        const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
         const remaining_content_length = &reader.state.body_remaining_content_length;
         const remaining = remaining_content_length.*;
         if (remaining == 0) {
             reader.state = .ready;
             return error.EndOfStream;
         }
-        const n = try reader.in.discard(limit.min(.limited(remaining)));
+        const n = try reader.in.discard(limit.min(.limited64(remaining)));
         remaining_content_length.* = remaining - n;
         return n;
     }
 
     fn chunkedStream(io_r: *std.Io.Reader, w: *Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
-        const reader: *Reader = @fieldParentPtr("interface", io_r);
+        const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
         const chunk_len_ptr = switch (reader.state) {
             .ready => return error.EndOfStream,
             .body_remaining_chunk_len => |*x| x,
@@ -591,7 +591,7 @@ pub const Reader = struct {
                     }
                 }
                 if (cp.chunk_len == 0) return parseTrailers(reader, 0);
-                const n = try in.stream(w, limit.min(.limited(cp.chunk_len)));
+                const n = try in.stream(w, limit.min(.limited64(cp.chunk_len)));
                 chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
                 return n;
             },
@@ -607,7 +607,7 @@ pub const Reader = struct {
                 continue :len .head;
             },
             else => |remaining_chunk_len| {
-                const n = try in.stream(w, limit.min(.limited(@intFromEnum(remaining_chunk_len) - 2)));
+                const n = try in.stream(w, limit.min(.limited64(@intFromEnum(remaining_chunk_len) - 2)));
                 chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
                 return n;
             },
@@ -615,7 +615,7 @@ pub const Reader = struct {
     }
 
     fn chunkedDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
-        const reader: *Reader = @fieldParentPtr("interface", io_r);
+        const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
         const chunk_len_ptr = switch (reader.state) {
             .ready => return error.EndOfStream,
             .body_remaining_chunk_len => |*x| x,
@@ -659,7 +659,7 @@ pub const Reader = struct {
                     }
                 }
                 if (cp.chunk_len == 0) return parseTrailers(reader, 0);
-                const n = try in.discard(limit.min(.limited(cp.chunk_len)));
+                const n = try in.discard(limit.min(.limited64(cp.chunk_len)));
                 chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
                 return n;
             },
@@ -675,7 +675,7 @@ pub const Reader = struct {
                 continue :len .head;
             },
             else => |remaining_chunk_len| {
-                const n = try in.discard(limit.min(.limited(remaining_chunk_len.int() - 2)));
+                const n = try in.discard(limit.min(.limited64(remaining_chunk_len.int() - 2)));
                 chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
                 return n;
             },
@@ -758,7 +758,7 @@ pub const BodyWriter = struct {
 
     /// How many zeroes to reserve for hex-encoded chunk length.
     const chunk_len_digits = 8;
-    const max_chunk_len: usize = std.math.pow(usize, 16, chunk_len_digits) - 1;
+    const max_chunk_len: usize = std.math.pow(u64, 16, chunk_len_digits) - 1;
     const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
 
     comptime {
@@ -918,7 +918,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         assert(!bw.isEliding());
         const out = bw.http_protocol_output;
         const n = try out.writeSplatHeader(w.buffered(), data, splat);
@@ -927,7 +927,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         assert(!bw.isEliding());
         const out = bw.http_protocol_output;
         const n = try out.writeSplatHeader(w.buffered(), data, splat);
@@ -935,7 +935,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         const slice = data[0 .. data.len - 1];
         const pattern = data[slice.len];
         var written: usize = pattern.len * splat;
@@ -949,7 +949,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         if (File.Handle == void) return error.Unimplemented;
         if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented;
         switch (bw.state) {
@@ -976,7 +976,7 @@ pub const BodyWriter = struct {
 
     /// Returns `null` if size cannot be computed without making any syscalls.
     pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         assert(!bw.isEliding());
         const out = bw.http_protocol_output;
         const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
@@ -984,7 +984,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         assert(!bw.isEliding());
         const out = bw.http_protocol_output;
         const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
@@ -993,7 +993,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         assert(!bw.isEliding());
         const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse {
             // If the file size is unknown, we cannot lower to a `sendFile` since we would
@@ -1041,7 +1041,7 @@ pub const BodyWriter = struct {
     }
 
     pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
-        const bw: *BodyWriter = @fieldParentPtr("writer", w);
+        const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
         assert(!bw.isEliding());
         const out = bw.http_protocol_output;
         const data_len = w.end + Writer.countSplat(data, splat);