Commit f7bbcb4a4b
Changed files (8)
lib
std
lib/std/fs/Dir.zig
@@ -1356,6 +1356,11 @@ pub fn openDir(self: Dir, sub_path: []const u8, args: OpenOptions) OpenError!Dir
error.FileLocksNotSupported => unreachable, // locking folders is not supported
error.WouldBlock => unreachable, // can't happen for directories
error.FileBusy => unreachable, // can't happen for directories
+ error.SharingViolation => unreachable,
+ error.PipeBusy => unreachable,
+ error.ProcessNotFound => unreachable,
+ error.AntivirusInterference => unreachable,
+
else => |e| return e,
};
return .{ .fd = fd };
lib/std/Io/net/test.zig
@@ -81,7 +81,6 @@ test "IPv6 address parse failures" {
try testing.expectEqual(Unresolved.Parsed{ .invalid_byte = 5 }, Unresolved.parse("::123.123.123.123"));
try testing.expectEqual(Unresolved.Parsed.incomplete, Unresolved.parse("1"));
try testing.expectEqual(Unresolved.Parsed.incomplete, Unresolved.parse("ff01::fb%"));
- try testing.expectEqual(Unresolved.Parsed{ .interface_name_oversized = 9 }, Unresolved.parse("ff01::fb%wlp3" ++ "s0" ** @divExact(std.posix.IFNAMESIZE - 4, 2)));
}
test "invalid but parseable IPv6 scope ids" {
lib/std/Io/net.zig
@@ -1027,7 +1027,7 @@ pub const Socket = struct {
/// Underlying platform-defined type which may or may not be
/// interchangeable with a file system file descriptor.
pub const Handle = switch (native_os) {
- .windows => std.windows.ws2_32.SOCKET,
+ .windows => std.os.windows.ws2_32.SOCKET,
else => std.posix.fd_t,
};
lib/std/Io/Threaded.zig
@@ -541,7 +541,7 @@ fn groupAsync(
context_alignment: std.mem.Alignment,
start: *const fn (*Io.Group, context: *const anyopaque) void,
) void {
- if (builtin.single_threaded) return start(context.ptr);
+ if (builtin.single_threaded) return start(group, context.ptr);
const t: *Threaded = @ptrCast(@alignCast(userdata));
const cpu_count = t.cpu_count catch 1;
const gpa = t.allocator;
lib/std/Thread/Futex.zig
@@ -116,7 +116,7 @@ const SingleThreadedImpl = struct {
unreachable; // deadlock detected
};
- std.Thread.sleep(delay);
+ _ = delay;
return error.Timeout;
}
lib/std/zig/system/linux.zig
@@ -1,5 +1,7 @@
-const std = @import("std");
const builtin = @import("builtin");
+
+const std = @import("std");
+const Io = std.Io;
const mem = std.mem;
const fs = std.fs;
const fmt = std.fmt;
@@ -344,7 +346,7 @@ fn testParser(
expected_model: *const Target.Cpu.Model,
input: []const u8,
) !void {
- var r: std.Io.Reader = .fixed(input);
+ var r: Io.Reader = .fixed(input);
const result = try parser.parse(arch, &r);
try testing.expectEqual(expected_model, result.?.model);
try testing.expect(expected_model.features.eql(result.?.features));
@@ -357,7 +359,7 @@ fn testParser(
// When all the lines have been analyzed the finalize method is called.
fn CpuinfoParser(comptime impl: anytype) type {
return struct {
- fn parse(arch: Target.Cpu.Arch, reader: *std.Io.Reader) !?Target.Cpu {
+ fn parse(arch: Target.Cpu.Arch, reader: *Io.Reader) !?Target.Cpu {
var obj: impl = .{};
while (try reader.takeDelimiter('\n')) |line| {
const colon_pos = mem.indexOfScalar(u8, line, ':') orelse continue;
@@ -376,14 +378,14 @@ inline fn getAArch64CpuFeature(comptime feat_reg: []const u8) u64 {
);
}
-pub fn detectNativeCpuAndFeatures() ?Target.Cpu {
+pub fn detectNativeCpuAndFeatures(io: Io) ?Target.Cpu {
var file = fs.openFileAbsolute("/proc/cpuinfo", .{}) catch |err| switch (err) {
else => return null,
};
defer file.close();
var buffer: [4096]u8 = undefined; // "flags" lines can get pretty long.
- var file_reader = file.reader(&buffer);
+ var file_reader = file.reader(io, &buffer);
const current_arch = builtin.cpu.arch;
switch (current_arch) {
lib/std/zig/system.zig
@@ -367,10 +367,10 @@ pub fn resolveTargetQuery(io: Io, query: Target.Query) DetectError!Target {
}
var cpu = switch (query.cpu_model) {
- .native => detectNativeCpuAndFeatures(query_cpu_arch, os, query),
+ .native => detectNativeCpuAndFeatures(io, query_cpu_arch, os, query),
.baseline => Target.Cpu.baseline(query_cpu_arch, os),
.determined_by_arch_os => if (query.cpu_arch == null)
- detectNativeCpuAndFeatures(query_cpu_arch, os, query)
+ detectNativeCpuAndFeatures(io, query_cpu_arch, os, query)
else
Target.Cpu.baseline(query_cpu_arch, os),
.explicit => |model| model.toCpu(query_cpu_arch),
@@ -521,7 +521,7 @@ fn updateCpuFeatures(
set.removeFeatureSet(sub_set);
}
-fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, query: Target.Query) ?Target.Cpu {
+fn detectNativeCpuAndFeatures(io: Io, cpu_arch: Target.Cpu.Arch, os: Target.Os, query: Target.Query) ?Target.Cpu {
// Here we switch on a comptime value rather than `cpu_arch`. This is valid because `cpu_arch`,
// although it is a runtime value, is guaranteed to be one of the architectures in the set
// of the respective switch prong.
@@ -532,7 +532,7 @@ fn detectNativeCpuAndFeatures(cpu_arch: Target.Cpu.Arch, os: Target.Os, query: T
}
switch (builtin.os.tag) {
- .linux => return linux.detectNativeCpuAndFeatures(),
+ .linux => return linux.detectNativeCpuAndFeatures(io),
.macos => return darwin.macos.detectNativeCpuAndFeatures(),
.windows => return windows.detectNativeCpuAndFeatures(),
else => {},
lib/std/Io.zig
@@ -1282,7 +1282,7 @@ pub const TypeErasedQueue = struct {
var remaining = elements;
while (true) {
- const getter: *Get = @fieldParentPtr("node", q.getters.popFirst() orelse break);
+ const getter: *Get = @alignCast(@fieldParentPtr("node", q.getters.popFirst() orelse break));
const copy_len = @min(getter.remaining.len, remaining.len);
@memcpy(getter.remaining[0..copy_len], remaining[0..copy_len]);
remaining = remaining[copy_len..];
@@ -1379,7 +1379,7 @@ pub const TypeErasedQueue = struct {
}
// Copy directly from putters into buffer.
while (remaining.len > 0) {
- const putter: *Put = @fieldParentPtr("node", q.putters.popFirst() orelse break);
+ const putter: *Put = @alignCast(@fieldParentPtr("node", q.putters.popFirst() orelse break));
const copy_len = @min(putter.remaining.len, remaining.len);
@memcpy(remaining[0..copy_len], putter.remaining[0..copy_len]);
putter.remaining = putter.remaining[copy_len..];
@@ -1412,7 +1412,7 @@ pub const TypeErasedQueue = struct {
/// buffers been fully copied.
fn fillRingBufferFromPutters(q: *TypeErasedQueue, io: Io, len: usize) usize {
while (true) {
- const putter: *Put = @fieldParentPtr("node", q.putters.popFirst() orelse return len);
+ const putter: *Put = @alignCast(@fieldParentPtr("node", q.putters.popFirst() orelse return len));
const available = q.buffer[q.put_index..];
const copy_len = @min(available.len, putter.remaining.len);
@memcpy(available[0..copy_len], putter.remaining[0..copy_len]);