Commit 67e51311c3

Timon Kruiper <Timon Kruiper@LAPTOP-3V2IS54Q>
2020-03-26 13:41:53
fix behavior test with --test-evented-io on windows
also make simple file operations work asynchronously on windows
1 parent f7f563e
lib/std/fs/file.zig
@@ -251,6 +251,10 @@ pub const File = struct {
     pub const PReadError = os.PReadError;
 
     pub fn read(self: File, buffer: []u8) ReadError!usize {
+        if (builtin.os.tag == .windows) {
+            const enable_async_io = std.io.is_async and !self.async_block_allowed;
+            return windows.ReadFile(self.handle, buffer, null, enable_async_io);
+        }
         if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
             return std.event.Loop.instance.?.read(self.handle, buffer);
         } else {
@@ -271,6 +275,10 @@ pub const File = struct {
     }
 
     pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
+        if (builtin.os.tag == .windows) {
+            const enable_async_io = std.io.is_async and !self.async_block_allowed;
+            return windows.ReadFile(self.handle, buffer, offset, enable_async_io);
+        }
         if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
             return std.event.Loop.instance.?.pread(self.handle, buffer, offset);
         } else {
@@ -362,6 +370,10 @@ pub const File = struct {
     pub const PWriteError = os.PWriteError;
 
     pub fn write(self: File, bytes: []const u8) WriteError!usize {
+        if (builtin.os.tag == .windows) {
+            const enable_async_io = std.io.is_async and !self.async_block_allowed;
+            return windows.WriteFile(self.handle, bytes, null, enable_async_io);
+        }
         if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
             return std.event.Loop.instance.?.write(self.handle, bytes);
         } else {
@@ -377,6 +389,10 @@ pub const File = struct {
     }
 
     pub fn pwrite(self: File, bytes: []const u8, offset: u64) PWriteError!usize {
+        if (builtin.os.tag == .windows) {
+            const enable_async_io = std.io.is_async and !self.async_block_allowed;
+            return windows.WriteFile(self.handle, bytes, offset, enable_async_io);
+        }
         if (need_async_thread and self.io_mode == .blocking and !self.async_block_allowed) {
             return std.event.Loop.instance.?.pwrite(self.handle, bytes, offset);
         } else {
lib/std/os/windows.zig
@@ -108,6 +108,7 @@ pub fn OpenFileW(
     sa: ?*SECURITY_ATTRIBUTES,
     access_mask: ACCESS_MASK,
     creation: ULONG,
+    enable_async_io: bool,
 ) OpenError!HANDLE {
     if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
         return error.IsDir;
@@ -135,6 +136,7 @@ pub fn OpenFileW(
         .SecurityQualityOfService = null,
     };
     var io: IO_STATUS_BLOCK = undefined;
+    const blocking_flag = if (!enable_async_io) FILE_SYNCHRONOUS_IO_NONALERT else @as(ULONG, 0);
     const rc = ntdll.NtCreateFile(
         &result,
         access_mask,
@@ -144,7 +146,7 @@ pub fn OpenFileW(
         FILE_ATTRIBUTE_NORMAL,
         FILE_SHARE_WRITE | FILE_SHARE_READ | FILE_SHARE_DELETE,
         creation,
-        FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT,
+        FILE_NON_DIRECTORY_FILE | blocking_flag,
         null,
         0,
     );
@@ -428,10 +430,11 @@ pub const ReadFileError = error{
 
 /// If buffer's length exceeds what a Windows DWORD integer can hold, it will be broken into
 /// multiple non-atomic reads.
-pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64) ReadFileError!usize {
-    if (std.event.Loop.instance) |loop| {
+pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64, enable_async_io: bool) ReadFileError!usize {
+    if (std.event.Loop.instance != null and enable_async_io) {
+        const loop = std.event.Loop.instance.?;
         // TODO support async ReadFile with no offset
-        const off = offset.?;
+        const off = if (offset == null) 0 else offset.?;
         var resume_node = std.event.Loop.ResumeNode.Basic{
             .base = .{
                 .id = .Basic,
@@ -446,20 +449,20 @@ pub fn ReadFile(in_hFile: HANDLE, buffer: []u8, offset: ?u64) ReadFileError!usiz
             },
         };
         // TODO only call create io completion port once per fd
-        _ = windows.CreateIoCompletionPort(fd, loop.os_data.io_port, undefined, undefined) catch undefined;
+        _ = CreateIoCompletionPort(in_hFile, loop.os_data.io_port, undefined, undefined) catch undefined;
         loop.beginOneEvent();
         suspend {
             // TODO handle buffer bigger than DWORD can hold
-            _ = windows.kernel32.ReadFile(fd, buffer.ptr, @intCast(windows.DWORD, buffer.len), null, &resume_node.base.overlapped);
+            _ = kernel32.ReadFile(in_hFile, buffer.ptr, @intCast(DWORD, buffer.len), null, &resume_node.base.overlapped);
         }
-        var bytes_transferred: windows.DWORD = undefined;
-        if (windows.kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, windows.FALSE) == 0) {
-            switch (windows.kernel32.GetLastError()) {
+        var bytes_transferred: DWORD = undefined;
+        if (kernel32.GetOverlappedResult(in_hFile, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
+            switch (kernel32.GetLastError()) {
                 .IO_PENDING => unreachable,
                 .OPERATION_ABORTED => return error.OperationAborted,
                 .BROKEN_PIPE => return error.BrokenPipe,
                 .HANDLE_EOF => return @as(usize, bytes_transferred),
-                else => |err| return windows.unexpectedError(err),
+                else => |err| return unexpectedError(err),
             }
         }
         return @as(usize, bytes_transferred);
@@ -501,10 +504,11 @@ pub const WriteFileError = error{
     Unexpected,
 };
 
-pub fn WriteFile(handle: HANDLE, bytes: []const u8, offset: ?u64) WriteFileError!usize {
-    if (std.event.Loop.instance) |loop| {
+pub fn WriteFile(handle: HANDLE, bytes: []const u8, offset: ?u64, enable_async_io: bool) WriteFileError!usize {
+    if (std.event.Loop.instance != null and enable_async_io) {
+        const loop = std.event.Loop.instance.?;
         // TODO support async WriteFile with no offset
-        const off = offset.?;
+        const off = if (offset == null) 0 else offset.?;
         var resume_node = std.event.Loop.ResumeNode.Basic{
             .base = .{
                 .id = .Basic,
@@ -519,14 +523,14 @@ pub fn WriteFile(handle: HANDLE, bytes: []const u8, offset: ?u64) WriteFileError
             },
         };
         // TODO only call create io completion port once per fd
-        _ = CreateIoCompletionPort(fd, loop.os_data.io_port, undefined, undefined);
+        _ = CreateIoCompletionPort(handle, loop.os_data.io_port, undefined, undefined) catch undefined;
         loop.beginOneEvent();
         suspend {
-            const adjusted_len = math.cast(windows.DWORD, bytes.len) catch maxInt(windows.DWORD);
-            _ = kernel32.WriteFile(fd, bytes.ptr, adjusted_len, null, &resume_node.base.overlapped);
+            const adjusted_len = math.cast(DWORD, bytes.len) catch maxInt(DWORD);
+            _ = kernel32.WriteFile(handle, bytes.ptr, adjusted_len, null, &resume_node.base.overlapped);
         }
-        var bytes_transferred: windows.DWORD = undefined;
-        if (kernel32.GetOverlappedResult(fd, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
+        var bytes_transferred: DWORD = undefined;
+        if (kernel32.GetOverlappedResult(handle, &resume_node.base.overlapped, &bytes_transferred, FALSE) == 0) {
             switch (kernel32.GetLastError()) {
                 .IO_PENDING => unreachable,
                 .INVALID_USER_BUFFER => return error.SystemResources,
@@ -534,7 +538,7 @@ pub fn WriteFile(handle: HANDLE, bytes: []const u8, offset: ?u64) WriteFileError
                 .OPERATION_ABORTED => return error.OperationAborted,
                 .NOT_ENOUGH_QUOTA => return error.SystemResources,
                 .BROKEN_PIPE => return error.BrokenPipe,
-                else => |err| return windows.unexpectedError(err),
+                else => |err| return unexpectedError(err),
             }
         }
         return bytes_transferred;
lib/std/debug.zig
@@ -666,158 +666,160 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
 
 /// TODO resources https://github.com/ziglang/zig/issues/4353
 fn openCoffDebugInfo(allocator: *mem.Allocator, coff_file_path: [:0]const u16) !ModuleDebugInfo {
-    const coff_file = try std.fs.openFileAbsoluteW(coff_file_path.ptr, .{});
-    errdefer coff_file.close();
+    noasync {
+        const coff_file = try std.fs.openFileAbsoluteW(coff_file_path.ptr, .{ .always_blocking = true });
+        errdefer coff_file.close();
 
-    const coff_obj = try allocator.create(coff.Coff);
-    coff_obj.* = coff.Coff.init(allocator, coff_file);
+        const coff_obj = try allocator.create(coff.Coff);
+        coff_obj.* = coff.Coff.init(allocator, coff_file);
 
-    var di = ModuleDebugInfo{
-        .base_address = undefined,
-        .coff = coff_obj,
-        .pdb = undefined,
-        .sect_contribs = undefined,
-        .modules = undefined,
-    };
+        var di = ModuleDebugInfo{
+            .base_address = undefined,
+            .coff = coff_obj,
+            .pdb = undefined,
+            .sect_contribs = undefined,
+            .modules = undefined,
+        };
 
-    try di.coff.loadHeader();
+        try di.coff.loadHeader();
 
-    var path_buf: [windows.MAX_PATH]u8 = undefined;
-    const len = try di.coff.getPdbPath(path_buf[0..]);
-    const raw_path = path_buf[0..len];
+        var path_buf: [windows.MAX_PATH]u8 = undefined;
+        const len = try di.coff.getPdbPath(path_buf[0..]);
+        const raw_path = path_buf[0..len];
 
-    const path = try fs.path.resolve(allocator, &[_][]const u8{raw_path});
+        const path = try fs.path.resolve(allocator, &[_][]const u8{raw_path});
 
-    try di.pdb.openFile(di.coff, path);
+        try di.pdb.openFile(di.coff, path);
 
-    var pdb_stream = di.pdb.getStream(pdb.StreamType.Pdb) orelse return error.InvalidDebugInfo;
-    const version = try pdb_stream.inStream().readIntLittle(u32);
-    const signature = try pdb_stream.inStream().readIntLittle(u32);
-    const age = try pdb_stream.inStream().readIntLittle(u32);
-    var guid: [16]u8 = undefined;
-    try pdb_stream.inStream().readNoEof(&guid);
-    if (version != 20000404) // VC70, only value observed by LLVM team
-        return error.UnknownPDBVersion;
-    if (!mem.eql(u8, &di.coff.guid, &guid) or di.coff.age != age)
-        return error.PDBMismatch;
-    // We validated the executable and pdb match.
+        var pdb_stream = di.pdb.getStream(pdb.StreamType.Pdb) orelse return error.InvalidDebugInfo;
+        const version = try pdb_stream.inStream().readIntLittle(u32);
+        const signature = try pdb_stream.inStream().readIntLittle(u32);
+        const age = try pdb_stream.inStream().readIntLittle(u32);
+        var guid: [16]u8 = undefined;
+        try pdb_stream.inStream().readNoEof(&guid);
+        if (version != 20000404) // VC70, only value observed by LLVM team
+            return error.UnknownPDBVersion;
+        if (!mem.eql(u8, &di.coff.guid, &guid) or di.coff.age != age)
+            return error.PDBMismatch;
+        // We validated the executable and pdb match.
 
-    const string_table_index = str_tab_index: {
-        const name_bytes_len = try pdb_stream.inStream().readIntLittle(u32);
-        const name_bytes = try allocator.alloc(u8, name_bytes_len);
-        try pdb_stream.inStream().readNoEof(name_bytes);
+        const string_table_index = str_tab_index: {
+            const name_bytes_len = try pdb_stream.inStream().readIntLittle(u32);
+            const name_bytes = try allocator.alloc(u8, name_bytes_len);
+            try pdb_stream.inStream().readNoEof(name_bytes);
 
-        const HashTableHeader = packed struct {
-            Size: u32,
-            Capacity: u32,
+            const HashTableHeader = packed struct {
+                Size: u32,
+                Capacity: u32,
 
-            fn maxLoad(cap: u32) u32 {
-                return cap * 2 / 3 + 1;
-            }
-        };
-        const hash_tbl_hdr = try pdb_stream.inStream().readStruct(HashTableHeader);
-        if (hash_tbl_hdr.Capacity == 0)
-            return error.InvalidDebugInfo;
+                fn maxLoad(cap: u32) u32 {
+                    return cap * 2 / 3 + 1;
+                }
+            };
+            const hash_tbl_hdr = try pdb_stream.inStream().readStruct(HashTableHeader);
+            if (hash_tbl_hdr.Capacity == 0)
+                return error.InvalidDebugInfo;
 
-        if (hash_tbl_hdr.Size > HashTableHeader.maxLoad(hash_tbl_hdr.Capacity))
-            return error.InvalidDebugInfo;
+            if (hash_tbl_hdr.Size > HashTableHeader.maxLoad(hash_tbl_hdr.Capacity))
+                return error.InvalidDebugInfo;
 
-        const present = try readSparseBitVector(&pdb_stream.inStream(), allocator);
-        if (present.len != hash_tbl_hdr.Size)
-            return error.InvalidDebugInfo;
-        const deleted = try readSparseBitVector(&pdb_stream.inStream(), allocator);
+            const present = try readSparseBitVector(&pdb_stream.inStream(), allocator);
+            if (present.len != hash_tbl_hdr.Size)
+                return error.InvalidDebugInfo;
+            const deleted = try readSparseBitVector(&pdb_stream.inStream(), allocator);
 
-        const Bucket = struct {
-            first: u32,
-            second: u32,
-        };
-        const bucket_list = try allocator.alloc(Bucket, present.len);
-        for (present) |_| {
-            const name_offset = try pdb_stream.inStream().readIntLittle(u32);
-            const name_index = try pdb_stream.inStream().readIntLittle(u32);
-            const name = mem.toSlice(u8, @ptrCast([*:0]u8, name_bytes.ptr + name_offset));
-            if (mem.eql(u8, name, "/names")) {
-                break :str_tab_index name_index;
+            const Bucket = struct {
+                first: u32,
+                second: u32,
+            };
+            const bucket_list = try allocator.alloc(Bucket, present.len);
+            for (present) |_| {
+                const name_offset = try pdb_stream.inStream().readIntLittle(u32);
+                const name_index = try pdb_stream.inStream().readIntLittle(u32);
+                const name = mem.toSlice(u8, @ptrCast([*:0]u8, name_bytes.ptr + name_offset));
+                if (mem.eql(u8, name, "/names")) {
+                    break :str_tab_index name_index;
+                }
             }
-        }
-        return error.MissingDebugInfo;
-    };
+            return error.MissingDebugInfo;
+        };
 
-    di.pdb.string_table = di.pdb.getStreamById(string_table_index) orelse return error.MissingDebugInfo;
-    di.pdb.dbi = di.pdb.getStream(pdb.StreamType.Dbi) orelse return error.MissingDebugInfo;
+        di.pdb.string_table = di.pdb.getStreamById(string_table_index) orelse return error.MissingDebugInfo;
+        di.pdb.dbi = di.pdb.getStream(pdb.StreamType.Dbi) orelse return error.MissingDebugInfo;
 
-    const dbi = di.pdb.dbi;
+        const dbi = di.pdb.dbi;
 
-    // Dbi Header
-    const dbi_stream_header = try dbi.inStream().readStruct(pdb.DbiStreamHeader);
-    if (dbi_stream_header.VersionHeader != 19990903) // V70, only value observed by LLVM team
-        return error.UnknownPDBVersion;
-    if (dbi_stream_header.Age != age)
-        return error.UnmatchingPDB;
+        // Dbi Header
+        const dbi_stream_header = try dbi.inStream().readStruct(pdb.DbiStreamHeader);
+        if (dbi_stream_header.VersionHeader != 19990903) // V70, only value observed by LLVM team
+            return error.UnknownPDBVersion;
+        if (dbi_stream_header.Age != age)
+            return error.UnmatchingPDB;
 
-    const mod_info_size = dbi_stream_header.ModInfoSize;
-    const section_contrib_size = dbi_stream_header.SectionContributionSize;
+        const mod_info_size = dbi_stream_header.ModInfoSize;
+        const section_contrib_size = dbi_stream_header.SectionContributionSize;
 
-    var modules = ArrayList(Module).init(allocator);
+        var modules = ArrayList(Module).init(allocator);
 
-    // Module Info Substream
-    var mod_info_offset: usize = 0;
-    while (mod_info_offset != mod_info_size) {
-        const mod_info = try dbi.inStream().readStruct(pdb.ModInfo);
-        var this_record_len: usize = @sizeOf(pdb.ModInfo);
+        // Module Info Substream
+        var mod_info_offset: usize = 0;
+        while (mod_info_offset != mod_info_size) {
+            const mod_info = try dbi.inStream().readStruct(pdb.ModInfo);
+            var this_record_len: usize = @sizeOf(pdb.ModInfo);
 
-        const module_name = try dbi.readNullTermString(allocator);
-        this_record_len += module_name.len + 1;
+            const module_name = try dbi.readNullTermString(allocator);
+            this_record_len += module_name.len + 1;
 
-        const obj_file_name = try dbi.readNullTermString(allocator);
-        this_record_len += obj_file_name.len + 1;
+            const obj_file_name = try dbi.readNullTermString(allocator);
+            this_record_len += obj_file_name.len + 1;
 
-        if (this_record_len % 4 != 0) {
-            const round_to_next_4 = (this_record_len | 0x3) + 1;
-            const march_forward_bytes = round_to_next_4 - this_record_len;
-            try dbi.seekBy(@intCast(isize, march_forward_bytes));
-            this_record_len += march_forward_bytes;
-        }
+            if (this_record_len % 4 != 0) {
+                const round_to_next_4 = (this_record_len | 0x3) + 1;
+                const march_forward_bytes = round_to_next_4 - this_record_len;
+                try dbi.seekBy(@intCast(isize, march_forward_bytes));
+                this_record_len += march_forward_bytes;
+            }
 
-        try modules.append(Module{
-            .mod_info = mod_info,
-            .module_name = module_name,
-            .obj_file_name = obj_file_name,
+            try modules.append(Module{
+                .mod_info = mod_info,
+                .module_name = module_name,
+                .obj_file_name = obj_file_name,
 
-            .populated = false,
-            .symbols = undefined,
-            .subsect_info = undefined,
-            .checksum_offset = null,
-        });
+                .populated = false,
+                .symbols = undefined,
+                .subsect_info = undefined,
+                .checksum_offset = null,
+            });
 
-        mod_info_offset += this_record_len;
-        if (mod_info_offset > mod_info_size)
-            return error.InvalidDebugInfo;
-    }
+            mod_info_offset += this_record_len;
+            if (mod_info_offset > mod_info_size)
+                return error.InvalidDebugInfo;
+        }
 
-    di.modules = modules.toOwnedSlice();
+        di.modules = modules.toOwnedSlice();
 
-    // Section Contribution Substream
-    var sect_contribs = ArrayList(pdb.SectionContribEntry).init(allocator);
-    var sect_cont_offset: usize = 0;
-    if (section_contrib_size != 0) {
-        const ver = @intToEnum(pdb.SectionContrSubstreamVersion, try dbi.inStream().readIntLittle(u32));
-        if (ver != pdb.SectionContrSubstreamVersion.Ver60)
-            return error.InvalidDebugInfo;
-        sect_cont_offset += @sizeOf(u32);
-    }
-    while (sect_cont_offset != section_contrib_size) {
-        const entry = try sect_contribs.addOne();
-        entry.* = try dbi.inStream().readStruct(pdb.SectionContribEntry);
-        sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
+        // Section Contribution Substream
+        var sect_contribs = ArrayList(pdb.SectionContribEntry).init(allocator);
+        var sect_cont_offset: usize = 0;
+        if (section_contrib_size != 0) {
+            const ver = @intToEnum(pdb.SectionContrSubstreamVersion, try dbi.inStream().readIntLittle(u32));
+            if (ver != pdb.SectionContrSubstreamVersion.Ver60)
+                return error.InvalidDebugInfo;
+            sect_cont_offset += @sizeOf(u32);
+        }
+        while (sect_cont_offset != section_contrib_size) {
+            const entry = try sect_contribs.addOne();
+            entry.* = try dbi.inStream().readStruct(pdb.SectionContribEntry);
+            sect_cont_offset += @sizeOf(pdb.SectionContribEntry);
 
-        if (sect_cont_offset > section_contrib_size)
-            return error.InvalidDebugInfo;
-    }
+            if (sect_cont_offset > section_contrib_size)
+                return error.InvalidDebugInfo;
+        }
 
-    di.sect_contribs = sect_contribs.toOwnedSlice();
+        di.sect_contribs = sect_contribs.toOwnedSlice();
 
-    return di;
+        return di;
+    }
 }
 
 fn readSparseBitVector(stream: var, allocator: *mem.Allocator) ![]usize {
@@ -1476,151 +1478,153 @@ pub const ModuleDebugInfo = switch (builtin.os.tag) {
         }
 
         fn getSymbolAtAddress(self: *@This(), address: usize) !SymbolInfo {
-            // Translate the VA into an address into this object
-            const relocated_address = address - self.base_address;
+            noasync {
+                // Translate the VA into an address into this object
+                const relocated_address = address - self.base_address;
+
+                var coff_section: *coff.Section = undefined;
+                const mod_index = for (self.sect_contribs) |sect_contrib| {
+                    if (sect_contrib.Section > self.coff.sections.len) continue;
+                    // Remember that SectionContribEntry.Section is 1-based.
+                    coff_section = &self.coff.sections.toSlice()[sect_contrib.Section - 1];
+
+                    const vaddr_start = coff_section.header.virtual_address + sect_contrib.Offset;
+                    const vaddr_end = vaddr_start + sect_contrib.Size;
+                    if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
+                        break sect_contrib.ModuleIndex;
+                    }
+                } else {
+                    // we have no information to add to the address
+                    return SymbolInfo{};
+                };
 
-            var coff_section: *coff.Section = undefined;
-            const mod_index = for (self.sect_contribs) |sect_contrib| {
-                if (sect_contrib.Section > self.coff.sections.len) continue;
-                // Remember that SectionContribEntry.Section is 1-based.
-                coff_section = &self.coff.sections.toSlice()[sect_contrib.Section - 1];
+                const mod = &self.modules[mod_index];
+                try populateModule(self, mod);
+                const obj_basename = fs.path.basename(mod.obj_file_name);
 
-                const vaddr_start = coff_section.header.virtual_address + sect_contrib.Offset;
-                const vaddr_end = vaddr_start + sect_contrib.Size;
-                if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
-                    break sect_contrib.ModuleIndex;
-                }
-            } else {
-                // we have no information to add to the address
-                return SymbolInfo{};
-            };
+                var symbol_i: usize = 0;
+                const symbol_name = if (!mod.populated) "???" else while (symbol_i != mod.symbols.len) {
+                    const prefix = @ptrCast(*pdb.RecordPrefix, &mod.symbols[symbol_i]);
+                    if (prefix.RecordLen < 2)
+                        return error.InvalidDebugInfo;
+                    switch (prefix.RecordKind) {
+                        .S_LPROC32, .S_GPROC32 => {
+                            const proc_sym = @ptrCast(*pdb.ProcSym, &mod.symbols[symbol_i + @sizeOf(pdb.RecordPrefix)]);
+                            const vaddr_start = coff_section.header.virtual_address + proc_sym.CodeOffset;
+                            const vaddr_end = vaddr_start + proc_sym.CodeSize;
+                            if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
+                                break mem.toSliceConst(u8, @ptrCast([*:0]u8, proc_sym) + @sizeOf(pdb.ProcSym));
+                            }
+                        },
+                        else => {},
+                    }
+                    symbol_i += prefix.RecordLen + @sizeOf(u16);
+                    if (symbol_i > mod.symbols.len)
+                        return error.InvalidDebugInfo;
+                } else "???";
+
+                const subsect_info = mod.subsect_info;
+
+                var sect_offset: usize = 0;
+                var skip_len: usize = undefined;
+                const opt_line_info = subsections: {
+                    const checksum_offset = mod.checksum_offset orelse break :subsections null;
+                    while (sect_offset != subsect_info.len) : (sect_offset += skip_len) {
+                        const subsect_hdr = @ptrCast(*pdb.DebugSubsectionHeader, &subsect_info[sect_offset]);
+                        skip_len = subsect_hdr.Length;
+                        sect_offset += @sizeOf(pdb.DebugSubsectionHeader);
+
+                        switch (subsect_hdr.Kind) {
+                            .Lines => {
+                                var line_index = sect_offset;
+
+                                const line_hdr = @ptrCast(*pdb.LineFragmentHeader, &subsect_info[line_index]);
+                                if (line_hdr.RelocSegment == 0)
+                                    return error.MissingDebugInfo;
+                                line_index += @sizeOf(pdb.LineFragmentHeader);
+                                const frag_vaddr_start = coff_section.header.virtual_address + line_hdr.RelocOffset;
+                                const frag_vaddr_end = frag_vaddr_start + line_hdr.CodeSize;
+
+                                if (relocated_address >= frag_vaddr_start and relocated_address < frag_vaddr_end) {
+                                    // There is an unknown number of LineBlockFragmentHeaders (and their accompanying line and column records)
+                                    // from now on. We will iterate through them, and eventually find a LineInfo that we're interested in,
+                                    // breaking out to :subsections. If not, we will make sure to not read anything outside of this subsection.
+                                    const subsection_end_index = sect_offset + subsect_hdr.Length;
+
+                                    while (line_index < subsection_end_index) {
+                                        const block_hdr = @ptrCast(*pdb.LineBlockFragmentHeader, &subsect_info[line_index]);
+                                        line_index += @sizeOf(pdb.LineBlockFragmentHeader);
+                                        const start_line_index = line_index;
+
+                                        const has_column = line_hdr.Flags.LF_HaveColumns;
+
+                                        // All line entries are stored inside their line block by ascending start address.
+                                        // Heuristic: we want to find the last line entry
+                                        // that has a vaddr_start <= relocated_address.
+                                        // This is done with a simple linear search.
+                                        var line_i: u32 = 0;
+                                        while (line_i < block_hdr.NumLines) : (line_i += 1) {
+                                            const line_num_entry = @ptrCast(*pdb.LineNumberEntry, &subsect_info[line_index]);
+                                            line_index += @sizeOf(pdb.LineNumberEntry);
+
+                                            const vaddr_start = frag_vaddr_start + line_num_entry.Offset;
+                                            if (relocated_address < vaddr_start) {
+                                                break;
+                                            }
+                                        }
 
-            const mod = &self.modules[mod_index];
-            try populateModule(self, mod);
-            const obj_basename = fs.path.basename(mod.obj_file_name);
-
-            var symbol_i: usize = 0;
-            const symbol_name = if (!mod.populated) "???" else while (symbol_i != mod.symbols.len) {
-                const prefix = @ptrCast(*pdb.RecordPrefix, &mod.symbols[symbol_i]);
-                if (prefix.RecordLen < 2)
-                    return error.InvalidDebugInfo;
-                switch (prefix.RecordKind) {
-                    .S_LPROC32, .S_GPROC32 => {
-                        const proc_sym = @ptrCast(*pdb.ProcSym, &mod.symbols[symbol_i + @sizeOf(pdb.RecordPrefix)]);
-                        const vaddr_start = coff_section.header.virtual_address + proc_sym.CodeOffset;
-                        const vaddr_end = vaddr_start + proc_sym.CodeSize;
-                        if (relocated_address >= vaddr_start and relocated_address < vaddr_end) {
-                            break mem.toSliceConst(u8, @ptrCast([*:0]u8, proc_sym) + @sizeOf(pdb.ProcSym));
-                        }
-                    },
-                    else => {},
-                }
-                symbol_i += prefix.RecordLen + @sizeOf(u16);
-                if (symbol_i > mod.symbols.len)
-                    return error.InvalidDebugInfo;
-            } else "???";
-
-            const subsect_info = mod.subsect_info;
-
-            var sect_offset: usize = 0;
-            var skip_len: usize = undefined;
-            const opt_line_info = subsections: {
-                const checksum_offset = mod.checksum_offset orelse break :subsections null;
-                while (sect_offset != subsect_info.len) : (sect_offset += skip_len) {
-                    const subsect_hdr = @ptrCast(*pdb.DebugSubsectionHeader, &subsect_info[sect_offset]);
-                    skip_len = subsect_hdr.Length;
-                    sect_offset += @sizeOf(pdb.DebugSubsectionHeader);
-
-                    switch (subsect_hdr.Kind) {
-                        .Lines => {
-                            var line_index = sect_offset;
-
-                            const line_hdr = @ptrCast(*pdb.LineFragmentHeader, &subsect_info[line_index]);
-                            if (line_hdr.RelocSegment == 0)
-                                return error.MissingDebugInfo;
-                            line_index += @sizeOf(pdb.LineFragmentHeader);
-                            const frag_vaddr_start = coff_section.header.virtual_address + line_hdr.RelocOffset;
-                            const frag_vaddr_end = frag_vaddr_start + line_hdr.CodeSize;
-
-                            if (relocated_address >= frag_vaddr_start and relocated_address < frag_vaddr_end) {
-                                // There is an unknown number of LineBlockFragmentHeaders (and their accompanying line and column records)
-                                // from now on. We will iterate through them, and eventually find a LineInfo that we're interested in,
-                                // breaking out to :subsections. If not, we will make sure to not read anything outside of this subsection.
-                                const subsection_end_index = sect_offset + subsect_hdr.Length;
-
-                                while (line_index < subsection_end_index) {
-                                    const block_hdr = @ptrCast(*pdb.LineBlockFragmentHeader, &subsect_info[line_index]);
-                                    line_index += @sizeOf(pdb.LineBlockFragmentHeader);
-                                    const start_line_index = line_index;
-
-                                    const has_column = line_hdr.Flags.LF_HaveColumns;
-
-                                    // All line entries are stored inside their line block by ascending start address.
-                                    // Heuristic: we want to find the last line entry
-                                    // that has a vaddr_start <= relocated_address.
-                                    // This is done with a simple linear search.
-                                    var line_i: u32 = 0;
-                                    while (line_i < block_hdr.NumLines) : (line_i += 1) {
-                                        const line_num_entry = @ptrCast(*pdb.LineNumberEntry, &subsect_info[line_index]);
-                                        line_index += @sizeOf(pdb.LineNumberEntry);
-
-                                        const vaddr_start = frag_vaddr_start + line_num_entry.Offset;
-                                        if (relocated_address < vaddr_start) {
-                                            break;
+                                        // line_i == 0 would mean that no matching LineNumberEntry was found.
+                                        if (line_i > 0) {
+                                            const subsect_index = checksum_offset + block_hdr.NameIndex;
+                                            const chksum_hdr = @ptrCast(*pdb.FileChecksumEntryHeader, &mod.subsect_info[subsect_index]);
+                                            const strtab_offset = @sizeOf(pdb.PDBStringTableHeader) + chksum_hdr.FileNameOffset;
+                                            try self.pdb.string_table.seekTo(strtab_offset);
+                                            const source_file_name = try self.pdb.string_table.readNullTermString(self.allocator());
+
+                                            const line_entry_idx = line_i - 1;
+
+                                            const column = if (has_column) blk: {
+                                                const start_col_index = start_line_index + @sizeOf(pdb.LineNumberEntry) * block_hdr.NumLines;
+                                                const col_index = start_col_index + @sizeOf(pdb.ColumnNumberEntry) * line_entry_idx;
+                                                const col_num_entry = @ptrCast(*pdb.ColumnNumberEntry, &subsect_info[col_index]);
+                                                break :blk col_num_entry.StartColumn;
+                                            } else 0;
+
+                                            const found_line_index = start_line_index + line_entry_idx * @sizeOf(pdb.LineNumberEntry);
+                                            const line_num_entry = @ptrCast(*pdb.LineNumberEntry, &subsect_info[found_line_index]);
+                                            const flags = @ptrCast(*pdb.LineNumberEntry.Flags, &line_num_entry.Flags);
+
+                                            break :subsections LineInfo{
+                                                .allocator = self.allocator(),
+                                                .file_name = source_file_name,
+                                                .line = flags.Start,
+                                                .column = column,
+                                            };
                                         }
                                     }
 
-                                    // line_i == 0 would mean that no matching LineNumberEntry was found.
-                                    if (line_i > 0) {
-                                        const subsect_index = checksum_offset + block_hdr.NameIndex;
-                                        const chksum_hdr = @ptrCast(*pdb.FileChecksumEntryHeader, &mod.subsect_info[subsect_index]);
-                                        const strtab_offset = @sizeOf(pdb.PDBStringTableHeader) + chksum_hdr.FileNameOffset;
-                                        try self.pdb.string_table.seekTo(strtab_offset);
-                                        const source_file_name = try self.pdb.string_table.readNullTermString(self.allocator());
-
-                                        const line_entry_idx = line_i - 1;
-
-                                        const column = if (has_column) blk: {
-                                            const start_col_index = start_line_index + @sizeOf(pdb.LineNumberEntry) * block_hdr.NumLines;
-                                            const col_index = start_col_index + @sizeOf(pdb.ColumnNumberEntry) * line_entry_idx;
-                                            const col_num_entry = @ptrCast(*pdb.ColumnNumberEntry, &subsect_info[col_index]);
-                                            break :blk col_num_entry.StartColumn;
-                                        } else 0;
-
-                                        const found_line_index = start_line_index + line_entry_idx * @sizeOf(pdb.LineNumberEntry);
-                                        const line_num_entry = @ptrCast(*pdb.LineNumberEntry, &subsect_info[found_line_index]);
-                                        const flags = @ptrCast(*pdb.LineNumberEntry.Flags, &line_num_entry.Flags);
-
-                                        break :subsections LineInfo{
-                                            .allocator = self.allocator(),
-                                            .file_name = source_file_name,
-                                            .line = flags.Start,
-                                            .column = column,
-                                        };
+                                    // Checking that we are not reading garbage after the (possibly) multiple block fragments.
+                                    if (line_index != subsection_end_index) {
+                                        return error.InvalidDebugInfo;
                                     }
                                 }
+                            },
+                            else => {},
+                        }
 
-                                // Checking that we are not reading garbage after the (possibly) multiple block fragments.
-                                if (line_index != subsection_end_index) {
-                                    return error.InvalidDebugInfo;
-                                }
-                            }
-                        },
-                        else => {},
+                        if (sect_offset > subsect_info.len)
+                            return error.InvalidDebugInfo;
+                    } else {
+                        break :subsections null;
                     }
+                };
 
-                    if (sect_offset > subsect_info.len)
-                        return error.InvalidDebugInfo;
-                } else {
-                    break :subsections null;
-                }
-            };
-
-            return SymbolInfo{
-                .symbol_name = symbol_name,
-                .compile_unit_name = obj_basename,
-                .line_info = opt_line_info,
-            };
+                return SymbolInfo{
+                    .symbol_name = symbol_name,
+                    .compile_unit_name = obj_basename,
+                    .line_info = opt_line_info,
+                };
+            }
         }
     },
     .linux, .netbsd, .freebsd, .dragonfly => struct {
lib/std/fs.zig
@@ -613,9 +613,14 @@ pub const Dir = struct {
         const access_mask = w.SYNCHRONIZE |
             (if (flags.read) @as(u32, w.GENERIC_READ) else 0) |
             (if (flags.write) @as(u32, w.GENERIC_WRITE) else 0);
+        const enable_async_io = std.io.is_async and !flags.always_blocking;
         return @as(File, .{
-            .handle = try os.windows.OpenFileW(self.fd, sub_path_w, null, access_mask, w.FILE_OPEN),
+            .handle = try os.windows.OpenFileW(self.fd, sub_path_w, null, access_mask, w.FILE_OPEN, enable_async_io),
             .io_mode = .blocking,
+            .async_block_allowed = if (flags.always_blocking)
+                File.async_block_allowed_yes
+            else
+                File.async_block_allowed_no,
         });
     }
 
@@ -662,7 +667,7 @@ pub const Dir = struct {
         else
             @as(u32, w.FILE_OPEN_IF);
         return @as(File, .{
-            .handle = try os.windows.OpenFileW(self.fd, sub_path_w, null, access_mask, creation),
+            .handle = try os.windows.OpenFileW(self.fd, sub_path_w, null, access_mask, creation, std.io.is_async),
             .io_mode = .blocking,
         });
     }
lib/std/io.zig
@@ -42,10 +42,12 @@ fn getStdOutHandle() os.fd_t {
     return os.STDOUT_FILENO;
 }
 
+// TODO: async stdout on windows (https://github.com/ziglang/zig/pull/4816#issuecomment-604521023)
 pub fn getStdOut() File {
     return File{
         .handle = getStdOutHandle(),
         .io_mode = .blocking,
+        .async_block_allowed = if (builtin.os.tag == .windows) File.async_block_allowed_yes else File.async_block_allowed_no,
     };
 }
 
@@ -81,10 +83,12 @@ fn getStdInHandle() os.fd_t {
     return os.STDIN_FILENO;
 }
 
+// TODO: async stdin on windows (https://github.com/ziglang/zig/pull/4816#issuecomment-604521023)
 pub fn getStdIn() File {
     return File{
         .handle = getStdInHandle(),
         .io_mode = .blocking,
+        .async_block_allowed = if (builtin.os.tag == .windows) File.async_block_allowed_yes else File.async_block_allowed_no,
     };
 }
 
lib/std/os.zig
@@ -305,7 +305,7 @@ pub const ReadError = error{
 /// For POSIX the limit is `math.maxInt(isize)`.
 pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
     if (builtin.os.tag == .windows) {
-        return windows.ReadFile(fd, buf, null);
+        return windows.ReadFile(fd, buf, null, false);
     }
 
     if (builtin.os.tag == .wasi and !builtin.link_libc) {
@@ -407,7 +407,7 @@ pub const PReadError = ReadError || error{Unseekable};
 /// used to perform the I/O. `error.WouldBlock` is not possible on Windows.
 pub fn pread(fd: fd_t, buf: []u8, offset: u64) PReadError!usize {
     if (builtin.os.tag == .windows) {
-        return windows.ReadFile(fd, buf, offset);
+        return windows.ReadFile(fd, buf, offset, false);
     }
 
     while (true) {
@@ -583,7 +583,7 @@ pub const WriteError = error{
 /// The corresponding POSIX limit is `math.maxInt(isize)`.
 pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
     if (builtin.os.tag == .windows) {
-        return windows.WriteFile(fd, bytes, null);
+        return windows.WriteFile(fd, bytes, null, false);
     }
 
     if (builtin.os.tag == .wasi and !builtin.link_libc) {
@@ -708,7 +708,7 @@ pub const PWriteError = WriteError || error{Unseekable};
 /// The corresponding POSIX limit is `math.maxInt(isize)`.
 pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
     if (std.Target.current.os.tag == .windows) {
-        return windows.WriteFile(fd, bytes, offset);
+        return windows.WriteFile(fd, bytes, offset, false);
     }
 
     // Prevent EINVAL.
@@ -1651,7 +1651,7 @@ pub fn renameatW(
     ReplaceIfExists: windows.BOOLEAN,
 ) RenameError!void {
     const access_mask = windows.SYNCHRONIZE | windows.GENERIC_WRITE | windows.DELETE;
-    const src_fd = try windows.OpenFileW(old_dir_fd, old_path, null, access_mask, windows.FILE_OPEN);
+    const src_fd = try windows.OpenFileW(old_dir_fd, old_path, null, access_mask, windows.FILE_OPEN, false);
     defer windows.CloseHandle(src_fd);
 
     const struct_buf_len = @sizeOf(windows.FILE_RENAME_INFORMATION) + (MAX_PATH_BYTES - 1);
lib/std/pdb.zig
@@ -470,7 +470,7 @@ pub const Pdb = struct {
     msf: Msf,
 
     pub fn openFile(self: *Pdb, coff_ptr: *coff.Coff, file_name: []u8) !void {
-        self.in_file = try fs.cwd().openFile(file_name, .{});
+        self.in_file = try fs.cwd().openFile(file_name, .{ .always_blocking = true });
         self.allocator = coff_ptr.allocator;
         self.coff = coff_ptr;