master
1const builtin = @import("builtin");
2const std = @import("../std.zig");
3const Step = std.Build.Step;
4const Allocator = std.mem.Allocator;
5const assert = std.debug.assert;
6const fatal = std.process.fatal;
7const Watch = @This();
8const FsEvents = @import("Watch/FsEvents.zig");
9
10os: Os,
11/// The number to show as the number of directories being watched.
12dir_count: usize,
13// These fields are common to most implementations so are kept here for simplicity.
14// They are `undefined` on implementations which do not utilize then.
15dir_table: DirTable,
16generation: Generation,
17
18pub const have_impl = Os != void;
19
20/// Key is the directory to watch which contains one or more files we are
21/// interested in noticing changes to.
22///
23/// Value is generation.
24const DirTable = std.ArrayHashMapUnmanaged(Cache.Path, void, Cache.Path.TableAdapter, false);
25
26/// Special key of "." means any changes in this directory trigger the steps.
27const ReactionSet = std.StringArrayHashMapUnmanaged(StepSet);
28const StepSet = std.AutoArrayHashMapUnmanaged(*Step, Generation);
29
30const Generation = u8;
31
32const Hash = std.hash.Wyhash;
33const Cache = std.Build.Cache;
34
35const Os = switch (builtin.os.tag) {
36 .linux => struct {
37 const posix = std.posix;
38
39 /// Keyed differently but indexes correspond 1:1 with `dir_table`.
40 handle_table: HandleTable,
41 /// fanotify file descriptors are keyed by mount id since marks
42 /// are limited to a single filesystem.
43 poll_fds: std.AutoArrayHashMapUnmanaged(MountId, posix.pollfd),
44
45 const MountId = i32;
46 const HandleTable = std.ArrayHashMapUnmanaged(FileHandle, struct { mount_id: MountId, reaction_set: ReactionSet }, FileHandle.Adapter, false);
47
48 const fan_mask: std.os.linux.fanotify.MarkMask = .{
49 .CLOSE_WRITE = true,
50 .CREATE = true,
51 .DELETE = true,
52 .DELETE_SELF = true,
53 .EVENT_ON_CHILD = true,
54 .MOVED_FROM = true,
55 .MOVED_TO = true,
56 .MOVE_SELF = true,
57 .ONDIR = true,
58 };
59
60 const FileHandle = struct {
61 handle: *align(1) std.os.linux.file_handle,
62
63 fn clone(lfh: FileHandle, gpa: Allocator) Allocator.Error!FileHandle {
64 const bytes = lfh.slice();
65 const new_ptr = try gpa.alignedAlloc(
66 u8,
67 .of(std.os.linux.file_handle),
68 @sizeOf(std.os.linux.file_handle) + bytes.len,
69 );
70 const new_header: *std.os.linux.file_handle = @ptrCast(new_ptr);
71 new_header.* = lfh.handle.*;
72 const new: FileHandle = .{ .handle = new_header };
73 @memcpy(new.slice(), lfh.slice());
74 return new;
75 }
76
77 fn destroy(lfh: FileHandle, gpa: Allocator) void {
78 const ptr: [*]u8 = @ptrCast(lfh.handle);
79 const allocated_slice = ptr[0 .. @sizeOf(std.os.linux.file_handle) + lfh.handle.handle_bytes];
80 return gpa.free(allocated_slice);
81 }
82
83 fn slice(lfh: FileHandle) []u8 {
84 const ptr: [*]u8 = &lfh.handle.f_handle;
85 return ptr[0..lfh.handle.handle_bytes];
86 }
87
88 const Adapter = struct {
89 pub fn hash(self: Adapter, a: FileHandle) u32 {
90 _ = self;
91 const unsigned_type: u32 = @bitCast(a.handle.handle_type);
92 return @truncate(Hash.hash(unsigned_type, a.slice()));
93 }
94 pub fn eql(self: Adapter, a: FileHandle, b: FileHandle, b_index: usize) bool {
95 _ = self;
96 _ = b_index;
97 return a.handle.handle_type == b.handle.handle_type and std.mem.eql(u8, a.slice(), b.slice());
98 }
99 };
100 };
101
102 fn init() !Watch {
103 return .{
104 .dir_table = .{},
105 .dir_count = 0,
106 .os = switch (builtin.os.tag) {
107 .linux => .{
108 .handle_table = .{},
109 .poll_fds = .{},
110 },
111 else => {},
112 },
113 .generation = 0,
114 };
115 }
116
117 fn getDirHandle(gpa: Allocator, path: std.Build.Cache.Path, mount_id: *MountId) !FileHandle {
118 var file_handle_buffer: [@sizeOf(std.os.linux.file_handle) + 128]u8 align(@alignOf(std.os.linux.file_handle)) = undefined;
119 var buf: [std.fs.max_path_bytes]u8 = undefined;
120 const adjusted_path = if (path.sub_path.len == 0) "./" else std.fmt.bufPrint(&buf, "{s}/", .{
121 path.sub_path,
122 }) catch return error.NameTooLong;
123 const stack_ptr: *std.os.linux.file_handle = @ptrCast(&file_handle_buffer);
124 stack_ptr.handle_bytes = file_handle_buffer.len - @sizeOf(std.os.linux.file_handle);
125 try posix.name_to_handle_at(path.root_dir.handle.fd, adjusted_path, stack_ptr, mount_id, std.os.linux.AT.HANDLE_FID);
126 const stack_lfh: FileHandle = .{ .handle = stack_ptr };
127 return stack_lfh.clone(gpa);
128 }
129
130 fn markDirtySteps(w: *Watch, gpa: Allocator, fan_fd: posix.fd_t) !bool {
131 const fanotify = std.os.linux.fanotify;
132 const M = fanotify.event_metadata;
133 var events_buf: [256 + 4096]u8 = undefined;
134 var any_dirty = false;
135 while (true) {
136 var len = posix.read(fan_fd, &events_buf) catch |err| switch (err) {
137 error.WouldBlock => return any_dirty,
138 else => |e| return e,
139 };
140 var meta: [*]align(1) M = @ptrCast(&events_buf);
141 while (len >= @sizeOf(M) and meta[0].event_len >= @sizeOf(M) and meta[0].event_len <= len) : ({
142 len -= meta[0].event_len;
143 meta = @ptrCast(@as([*]u8, @ptrCast(meta)) + meta[0].event_len);
144 }) {
145 assert(meta[0].vers == M.VERSION);
146 if (meta[0].mask.Q_OVERFLOW) {
147 any_dirty = true;
148 std.log.warn("file system watch queue overflowed; falling back to fstat", .{});
149 markAllFilesDirty(w, gpa);
150 return true;
151 }
152 const fid: *align(1) fanotify.event_info_fid = @ptrCast(meta + 1);
153 switch (fid.hdr.info_type) {
154 .DFID_NAME => {
155 const file_handle: *align(1) std.os.linux.file_handle = @ptrCast(&fid.handle);
156 const file_name_z: [*:0]u8 = @ptrCast((&file_handle.f_handle).ptr + file_handle.handle_bytes);
157 const file_name = std.mem.span(file_name_z);
158 const lfh: FileHandle = .{ .handle = file_handle };
159 if (w.os.handle_table.getPtr(lfh)) |value| {
160 if (value.reaction_set.getPtr(".")) |glob_set|
161 any_dirty = markStepSetDirty(gpa, glob_set, any_dirty);
162 if (value.reaction_set.getPtr(file_name)) |step_set|
163 any_dirty = markStepSetDirty(gpa, step_set, any_dirty);
164 }
165 },
166 else => |t| std.log.warn("unexpected fanotify event '{s}'", .{@tagName(t)}),
167 }
168 }
169 }
170 }
171
172 fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
173 // Add missing marks and note persisted ones.
174 for (steps) |step| {
175 for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| {
176 const reaction_set = rs: {
177 const gop = try w.dir_table.getOrPut(gpa, path);
178 if (!gop.found_existing) {
179 var mount_id: MountId = undefined;
180 const dir_handle = Os.getDirHandle(gpa, path, &mount_id) catch |err| switch (err) {
181 error.FileNotFound => {
182 std.debug.assert(w.dir_table.swapRemove(path));
183 continue;
184 },
185 else => return err,
186 };
187 const fan_fd = blk: {
188 const fd_gop = try w.os.poll_fds.getOrPut(gpa, mount_id);
189 if (!fd_gop.found_existing) {
190 const fan_fd = std.posix.fanotify_init(.{
191 .CLASS = .NOTIF,
192 .CLOEXEC = true,
193 .NONBLOCK = true,
194 .REPORT_NAME = true,
195 .REPORT_DIR_FID = true,
196 .REPORT_FID = true,
197 .REPORT_TARGET_FID = true,
198 }, 0) catch |err| switch (err) {
199 error.UnsupportedFlags => fatal("fanotify_init failed due to old kernel; requires 5.17+", .{}),
200 else => |e| return e,
201 };
202 fd_gop.value_ptr.* = .{
203 .fd = fan_fd,
204 .events = std.posix.POLL.IN,
205 .revents = undefined,
206 };
207 }
208 break :blk fd_gop.value_ptr.*.fd;
209 };
210 // `dir_handle` may already be present in the table in
211 // the case that we have multiple Cache.Path instances
212 // that compare inequal but ultimately point to the same
213 // directory on the file system.
214 // In such case, we must revert adding this directory, but keep
215 // the additions to the step set.
216 const dh_gop = try w.os.handle_table.getOrPut(gpa, dir_handle);
217 if (dh_gop.found_existing) {
218 _ = w.dir_table.pop();
219 } else {
220 assert(dh_gop.index == gop.index);
221 dh_gop.value_ptr.* = .{ .mount_id = mount_id, .reaction_set = .{} };
222 posix.fanotify_mark(fan_fd, .{
223 .ADD = true,
224 .ONLYDIR = true,
225 }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| {
226 fatal("unable to watch {f}: {s}", .{ path, @errorName(err) });
227 };
228 }
229 break :rs &dh_gop.value_ptr.reaction_set;
230 }
231 break :rs &w.os.handle_table.values()[gop.index].reaction_set;
232 };
233 for (files.items) |basename| {
234 const gop = try reaction_set.getOrPut(gpa, basename);
235 if (!gop.found_existing) gop.value_ptr.* = .{};
236 try gop.value_ptr.put(gpa, step, w.generation);
237 }
238 }
239 }
240
241 {
242 // Remove marks for files that are no longer inputs.
243 var i: usize = 0;
244 while (i < w.os.handle_table.entries.len) {
245 {
246 const reaction_set = &w.os.handle_table.values()[i].reaction_set;
247 var step_set_i: usize = 0;
248 while (step_set_i < reaction_set.entries.len) {
249 const step_set = &reaction_set.values()[step_set_i];
250 var dirent_i: usize = 0;
251 while (dirent_i < step_set.entries.len) {
252 const generations = step_set.values();
253 if (generations[dirent_i] == w.generation) {
254 dirent_i += 1;
255 continue;
256 }
257 step_set.swapRemoveAt(dirent_i);
258 }
259 if (step_set.entries.len > 0) {
260 step_set_i += 1;
261 continue;
262 }
263 reaction_set.swapRemoveAt(step_set_i);
264 }
265 if (reaction_set.entries.len > 0) {
266 i += 1;
267 continue;
268 }
269 }
270
271 const path = w.dir_table.keys()[i];
272
273 const mount_id = w.os.handle_table.values()[i].mount_id;
274 const fan_fd = w.os.poll_fds.getEntry(mount_id).?.value_ptr.fd;
275 posix.fanotify_mark(fan_fd, .{
276 .REMOVE = true,
277 .ONLYDIR = true,
278 }, fan_mask, path.root_dir.handle.fd, path.subPathOrDot()) catch |err| switch (err) {
279 error.FileNotFound => {}, // Expected, harmless.
280 else => |e| std.log.warn("unable to unwatch '{f}': {s}", .{ path, @errorName(e) }),
281 };
282
283 w.dir_table.swapRemoveAt(i);
284 w.os.handle_table.swapRemoveAt(i);
285 }
286 w.generation +%= 1;
287 }
288 w.dir_count = w.dir_table.count();
289 }
290
291 fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
292 const events_len = try std.posix.poll(w.os.poll_fds.values(), timeout.to_i32_ms());
293 if (events_len == 0)
294 return .timeout;
295 for (w.os.poll_fds.values()) |poll_fd| {
296 if (poll_fd.revents & std.posix.POLL.IN == std.posix.POLL.IN and try Os.markDirtySteps(w, gpa, poll_fd.fd))
297 return .dirty;
298 }
299 return .clean;
300 }
301 },
302 .windows => struct {
303 const windows = std.os.windows;
304
305 /// Keyed differently but indexes correspond 1:1 with `dir_table`.
306 handle_table: HandleTable,
307 dir_list: std.AutoArrayHashMapUnmanaged(usize, *Directory),
308 io_cp: ?windows.HANDLE,
309 counter: usize = 0,
310
311 const HandleTable = std.AutoArrayHashMapUnmanaged(FileId, ReactionSet);
312
313 const FileId = struct {
314 volumeSerialNumber: windows.ULONG,
315 indexNumber: windows.LARGE_INTEGER,
316 };
317
318 const Directory = struct {
319 handle: windows.HANDLE,
320 id: FileId,
321 overlapped: windows.OVERLAPPED,
322 // 64 KB is the packet size limit when monitoring over a network.
323 // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw#remarks
324 buffer: [64 * 1024]u8 align(@alignOf(windows.FILE_NOTIFY_INFORMATION)) = undefined,
325
326 /// Start listening for events, buffer field will be overwritten eventually.
327 fn startListening(self: *@This()) !void {
328 const r = windows.kernel32.ReadDirectoryChangesW(
329 self.handle,
330 @ptrCast(&self.buffer),
331 self.buffer.len,
332 0,
333 .{
334 .creation = true,
335 .dir_name = true,
336 .file_name = true,
337 .last_write = true,
338 .size = true,
339 },
340 null,
341 &self.overlapped,
342 null,
343 );
344 if (r == windows.FALSE) {
345 switch (windows.GetLastError()) {
346 .INVALID_FUNCTION => return error.ReadDirectoryChangesUnsupported,
347 else => |err| return windows.unexpectedError(err),
348 }
349 }
350 }
351
352 fn init(gpa: Allocator, path: Cache.Path) !*@This() {
353 // The following code is a drawn out NtCreateFile call. (mostly adapted from std.fs.Dir.makeOpenDirAccessMaskW)
354 // It's necessary in order to get the specific flags that are required when calling ReadDirectoryChangesW.
355 var dir_handle: windows.HANDLE = undefined;
356 const root_fd = path.root_dir.handle.fd;
357 const sub_path = path.subPathOrDot();
358 const sub_path_w = try windows.sliceToPrefixedFileW(root_fd, sub_path);
359 const path_len_bytes = std.math.cast(u16, sub_path_w.len * 2) orelse return error.NameTooLong;
360
361 var nt_name = windows.UNICODE_STRING{
362 .Length = @intCast(path_len_bytes),
363 .MaximumLength = @intCast(path_len_bytes),
364 .Buffer = @constCast(sub_path_w.span().ptr),
365 };
366 var attr = windows.OBJECT_ATTRIBUTES{
367 .Length = @sizeOf(windows.OBJECT_ATTRIBUTES),
368 .RootDirectory = if (std.fs.path.isAbsoluteWindowsW(sub_path_w.span())) null else root_fd,
369 .Attributes = 0, // Note we do not use OBJ_CASE_INSENSITIVE here.
370 .ObjectName = &nt_name,
371 .SecurityDescriptor = null,
372 .SecurityQualityOfService = null,
373 };
374 var io: windows.IO_STATUS_BLOCK = undefined;
375
376 switch (windows.ntdll.NtCreateFile(
377 &dir_handle,
378 windows.SYNCHRONIZE | windows.GENERIC_READ | windows.FILE_LIST_DIRECTORY,
379 &attr,
380 &io,
381 null,
382 0,
383 windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE | windows.FILE_SHARE_DELETE,
384 windows.FILE_OPEN,
385 windows.FILE_DIRECTORY_FILE | windows.FILE_OPEN_FOR_BACKUP_INTENT,
386 null,
387 0,
388 )) {
389 .SUCCESS => {},
390 .OBJECT_NAME_INVALID => return error.BadPathName,
391 .OBJECT_NAME_NOT_FOUND => return error.FileNotFound,
392 .OBJECT_NAME_COLLISION => return error.PathAlreadyExists,
393 .OBJECT_PATH_NOT_FOUND => return error.FileNotFound,
394 .NOT_A_DIRECTORY => return error.NotDir,
395 // This can happen if the directory has 'List folder contents' permission set to 'Deny'
396 .ACCESS_DENIED => return error.AccessDenied,
397 .INVALID_PARAMETER => unreachable,
398 else => |rc| return windows.unexpectedStatus(rc),
399 }
400 assert(dir_handle != windows.INVALID_HANDLE_VALUE);
401 errdefer windows.CloseHandle(dir_handle);
402
403 const dir_id = try getFileId(dir_handle);
404
405 const dir_ptr = try gpa.create(@This());
406 dir_ptr.* = .{
407 .handle = dir_handle,
408 .id = dir_id,
409 .overlapped = std.mem.zeroes(windows.OVERLAPPED),
410 };
411 return dir_ptr;
412 }
413
414 fn deinit(self: *@This(), gpa: Allocator) void {
415 _ = windows.kernel32.CancelIo(self.handle);
416 windows.CloseHandle(self.handle);
417 gpa.destroy(self);
418 }
419 };
420
421 fn init() !Watch {
422 return .{
423 .dir_table = .{},
424 .dir_count = 0,
425 .os = switch (builtin.os.tag) {
426 .windows => .{
427 .handle_table = .{},
428 .dir_list = .{},
429 .io_cp = null,
430 },
431 else => {},
432 },
433 .generation = 0,
434 };
435 }
436
437 fn getFileId(handle: windows.HANDLE) !FileId {
438 var file_id: FileId = undefined;
439 var io_status: windows.IO_STATUS_BLOCK = undefined;
440 var volume_info: windows.FILE_FS_VOLUME_INFORMATION = undefined;
441 switch (windows.ntdll.NtQueryVolumeInformationFile(
442 handle,
443 &io_status,
444 &volume_info,
445 @sizeOf(windows.FILE_FS_VOLUME_INFORMATION),
446 .FileFsVolumeInformation,
447 )) {
448 .SUCCESS => {},
449 // Buffer overflow here indicates that there is more information available than was able to be stored in the buffer
450 // size provided. This is treated as success because the type of variable-length information that this would be relevant for
451 // (name, volume name, etc) we don't care about.
452 .BUFFER_OVERFLOW => {},
453 else => |rc| return windows.unexpectedStatus(rc),
454 }
455 file_id.volumeSerialNumber = volume_info.VolumeSerialNumber;
456 var internal_info: windows.FILE_INTERNAL_INFORMATION = undefined;
457 switch (windows.ntdll.NtQueryInformationFile(
458 handle,
459 &io_status,
460 &internal_info,
461 @sizeOf(windows.FILE_INTERNAL_INFORMATION),
462 .FileInternalInformation,
463 )) {
464 .SUCCESS => {},
465 else => |rc| return windows.unexpectedStatus(rc),
466 }
467 file_id.indexNumber = internal_info.IndexNumber;
468 return file_id;
469 }
470
471 fn markDirtySteps(w: *Watch, gpa: Allocator, dir: *Directory) !bool {
472 var any_dirty = false;
473 const bytes_returned = try windows.GetOverlappedResult(dir.handle, &dir.overlapped, false);
474 if (bytes_returned == 0) {
475 std.log.warn("file system watch queue overflowed; falling back to fstat", .{});
476 markAllFilesDirty(w, gpa);
477 try dir.startListening();
478 return true;
479 }
480 var file_name_buf: [std.fs.max_path_bytes]u8 = undefined;
481 var notify: *align(1) windows.FILE_NOTIFY_INFORMATION = undefined;
482 var offset: usize = 0;
483 while (true) {
484 notify = @ptrCast(&dir.buffer[offset]);
485 const file_name_field: [*]u16 = @ptrFromInt(@intFromPtr(notify) + @sizeOf(windows.FILE_NOTIFY_INFORMATION));
486 const file_name_len = std.unicode.wtf16LeToWtf8(&file_name_buf, file_name_field[0 .. notify.FileNameLength / 2]);
487 const file_name = file_name_buf[0..file_name_len];
488 if (w.os.handle_table.getIndex(dir.id)) |reaction_set_i| {
489 const reaction_set = w.os.handle_table.values()[reaction_set_i];
490 if (reaction_set.getPtr(".")) |glob_set|
491 any_dirty = markStepSetDirty(gpa, glob_set, any_dirty);
492 if (reaction_set.getPtr(file_name)) |step_set| {
493 any_dirty = markStepSetDirty(gpa, step_set, any_dirty);
494 }
495 }
496 if (notify.NextEntryOffset == 0)
497 break;
498
499 offset += notify.NextEntryOffset;
500 }
501
502 // We call this now since at this point we have finished reading dir.buffer.
503 try dir.startListening();
504 return any_dirty;
505 }
506
507 fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
508 // Add missing marks and note persisted ones.
509 for (steps) |step| {
510 for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| {
511 const reaction_set = rs: {
512 const gop = try w.dir_table.getOrPut(gpa, path);
513 if (!gop.found_existing) {
514 const dir = try Os.Directory.init(gpa, path);
515 errdefer dir.deinit(gpa);
516 // `dir.id` may already be present in the table in
517 // the case that we have multiple Cache.Path instances
518 // that compare inequal but ultimately point to the same
519 // directory on the file system.
520 // In such case, we must revert adding this directory, but keep
521 // the additions to the step set.
522 const dh_gop = try w.os.handle_table.getOrPut(gpa, dir.id);
523 if (dh_gop.found_existing) {
524 dir.deinit(gpa);
525 _ = w.dir_table.pop();
526 } else {
527 assert(dh_gop.index == gop.index);
528 dh_gop.value_ptr.* = .{};
529 try dir.startListening();
530 const key = w.os.counter;
531 w.os.counter +%= 1;
532 try w.os.dir_list.put(gpa, key, dir);
533 w.os.io_cp = try windows.CreateIoCompletionPort(
534 dir.handle,
535 w.os.io_cp,
536 key,
537 0,
538 );
539 }
540 break :rs &w.os.handle_table.values()[dh_gop.index];
541 }
542 break :rs &w.os.handle_table.values()[gop.index];
543 };
544 for (files.items) |basename| {
545 const gop = try reaction_set.getOrPut(gpa, basename);
546 if (!gop.found_existing) gop.value_ptr.* = .{};
547 try gop.value_ptr.put(gpa, step, w.generation);
548 }
549 }
550 }
551
552 {
553 // Remove marks for files that are no longer inputs.
554 var i: usize = 0;
555 while (i < w.os.handle_table.entries.len) {
556 {
557 const reaction_set = &w.os.handle_table.values()[i];
558 var step_set_i: usize = 0;
559 while (step_set_i < reaction_set.entries.len) {
560 const step_set = &reaction_set.values()[step_set_i];
561 var dirent_i: usize = 0;
562 while (dirent_i < step_set.entries.len) {
563 const generations = step_set.values();
564 if (generations[dirent_i] == w.generation) {
565 dirent_i += 1;
566 continue;
567 }
568 step_set.swapRemoveAt(dirent_i);
569 }
570 if (step_set.entries.len > 0) {
571 step_set_i += 1;
572 continue;
573 }
574 reaction_set.swapRemoveAt(step_set_i);
575 }
576 if (reaction_set.entries.len > 0) {
577 i += 1;
578 continue;
579 }
580 }
581
582 w.os.dir_list.values()[i].deinit(gpa);
583 w.os.dir_list.swapRemoveAt(i);
584 w.dir_table.swapRemoveAt(i);
585 w.os.handle_table.swapRemoveAt(i);
586 }
587 w.generation +%= 1;
588 }
589 w.dir_count = w.dir_table.count();
590 }
591
592 fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
593 var bytes_transferred: std.os.windows.DWORD = undefined;
594 var key: usize = undefined;
595 var overlapped_ptr: ?*std.os.windows.OVERLAPPED = undefined;
596 return while (true) switch (std.os.windows.GetQueuedCompletionStatus(
597 w.os.io_cp.?,
598 &bytes_transferred,
599 &key,
600 &overlapped_ptr,
601 @bitCast(timeout.to_i32_ms()),
602 )) {
603 .Normal => {
604 if (bytes_transferred == 0)
605 break error.Unexpected;
606
607 // This 'orelse' detects a race condition that happens when we receive a
608 // completion notification for a directory that no longer exists in our list.
609 const dir = w.os.dir_list.get(key) orelse break .clean;
610
611 break if (try Os.markDirtySteps(w, gpa, dir))
612 .dirty
613 else
614 .clean;
615 },
616 .Timeout => break .timeout,
617 // This status is issued because CancelIo was called, skip and try again.
618 .Canceled => continue,
619 else => break error.Unexpected,
620 };
621 }
622 },
623 .dragonfly, .freebsd, .netbsd, .openbsd, .ios, .tvos, .visionos, .watchos => struct {
624 const posix = std.posix;
625
626 kq_fd: i32,
627 /// Indexes correspond 1:1 with `dir_table`.
628 handles: std.MultiArrayList(struct {
629 rs: ReactionSet,
630 /// If the corresponding dir_table Path has sub_path == "", then it
631 /// suffices as the open directory handle, and this value will be
632 /// -1. Otherwise, it needs to be opened in update(), and will be
633 /// stored here.
634 dir_fd: i32,
635 }),
636
637 const dir_open_flags: posix.O = f: {
638 var f: posix.O = .{
639 .ACCMODE = .RDONLY,
640 .NOFOLLOW = false,
641 .DIRECTORY = true,
642 .CLOEXEC = true,
643 };
644 if (@hasField(posix.O, "EVTONLY")) f.EVTONLY = true;
645 if (@hasField(posix.O, "PATH")) f.PATH = true;
646 break :f f;
647 };
648
649 const EV = std.c.EV;
650 const NOTE = std.c.NOTE;
651
652 fn init() !Watch {
653 const kq_fd = try posix.kqueue();
654 errdefer posix.close(kq_fd);
655 return .{
656 .dir_table = .{},
657 .dir_count = 0,
658 .os = .{
659 .kq_fd = kq_fd,
660 .handles = .empty,
661 },
662 .generation = 0,
663 };
664 }
665
666 fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
667 const handles = &w.os.handles;
668 for (steps) |step| {
669 for (step.inputs.table.keys(), step.inputs.table.values()) |path, *files| {
670 const reaction_set = rs: {
671 const gop = try w.dir_table.getOrPut(gpa, path);
672 if (!gop.found_existing) {
673 const skip_open_dir = path.sub_path.len == 0;
674 const dir_fd = if (skip_open_dir)
675 path.root_dir.handle.fd
676 else
677 posix.openat(path.root_dir.handle.fd, path.sub_path, dir_open_flags, 0) catch |err| {
678 fatal("failed to open directory {f}: {s}", .{ path, @errorName(err) });
679 };
680 // Empirically the dir has to stay open or else no events are triggered.
681 errdefer if (!skip_open_dir) posix.close(dir_fd);
682 const changes = [1]posix.Kevent{.{
683 .ident = @bitCast(@as(isize, dir_fd)),
684 .filter = std.c.EVFILT.VNODE,
685 .flags = EV.ADD | EV.ENABLE | EV.CLEAR,
686 .fflags = NOTE.DELETE | NOTE.WRITE | NOTE.RENAME | NOTE.REVOKE,
687 .data = 0,
688 .udata = gop.index,
689 }};
690 _ = try posix.kevent(w.os.kq_fd, &changes, &.{}, null);
691 assert(handles.len == gop.index);
692 try handles.append(gpa, .{
693 .rs = .{},
694 .dir_fd = if (skip_open_dir) -1 else dir_fd,
695 });
696 }
697
698 break :rs &handles.items(.rs)[gop.index];
699 };
700 for (files.items) |basename| {
701 const gop = try reaction_set.getOrPut(gpa, basename);
702 if (!gop.found_existing) gop.value_ptr.* = .{};
703 try gop.value_ptr.put(gpa, step, w.generation);
704 }
705 }
706 }
707
708 {
709 // Remove marks for files that are no longer inputs.
710 var i: usize = 0;
711 while (i < handles.len) {
712 {
713 const reaction_set = &handles.items(.rs)[i];
714 var step_set_i: usize = 0;
715 while (step_set_i < reaction_set.entries.len) {
716 const step_set = &reaction_set.values()[step_set_i];
717 var dirent_i: usize = 0;
718 while (dirent_i < step_set.entries.len) {
719 const generations = step_set.values();
720 if (generations[dirent_i] == w.generation) {
721 dirent_i += 1;
722 continue;
723 }
724 step_set.swapRemoveAt(dirent_i);
725 }
726 if (step_set.entries.len > 0) {
727 step_set_i += 1;
728 continue;
729 }
730 reaction_set.swapRemoveAt(step_set_i);
731 }
732 if (reaction_set.entries.len > 0) {
733 i += 1;
734 continue;
735 }
736 }
737
738 // If the sub_path == "" then this patch has already the
739 // dir fd that we need to use as the ident to remove the
740 // event. If it was opened above with openat() then we need
741 // to access that data via the dir_fd field.
742 const path = w.dir_table.keys()[i];
743 const dir_fd = if (path.sub_path.len == 0)
744 path.root_dir.handle.fd
745 else
746 handles.items(.dir_fd)[i];
747 assert(dir_fd != -1);
748
749 // The changelist also needs to update the udata field of the last
750 // event, since we are doing a swap remove, and we store the dir_table
751 // index in the udata field.
752 const last_dir_fd = fd: {
753 const last_path = w.dir_table.keys()[handles.len - 1];
754 const last_dir_fd = if (last_path.sub_path.len == 0)
755 last_path.root_dir.handle.fd
756 else
757 handles.items(.dir_fd)[handles.len - 1];
758 assert(last_dir_fd != -1);
759 break :fd last_dir_fd;
760 };
761 const changes = [_]posix.Kevent{
762 .{
763 .ident = @bitCast(@as(isize, dir_fd)),
764 .filter = std.c.EVFILT.VNODE,
765 .flags = EV.DELETE,
766 .fflags = 0,
767 .data = 0,
768 .udata = i,
769 },
770 .{
771 .ident = @bitCast(@as(isize, last_dir_fd)),
772 .filter = std.c.EVFILT.VNODE,
773 .flags = EV.ADD,
774 .fflags = NOTE.DELETE | NOTE.WRITE | NOTE.RENAME | NOTE.REVOKE,
775 .data = 0,
776 .udata = i,
777 },
778 };
779 const filtered_changes = if (i == handles.len - 1) changes[0..1] else &changes;
780 _ = try posix.kevent(w.os.kq_fd, filtered_changes, &.{}, null);
781 if (path.sub_path.len != 0) posix.close(dir_fd);
782
783 w.dir_table.swapRemoveAt(i);
784 handles.swapRemove(i);
785 }
786 w.generation +%= 1;
787 }
788 w.dir_count = w.dir_table.count();
789 }
790
791 fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
792 var timespec_buffer: posix.timespec = undefined;
793 var event_buffer: [100]posix.Kevent = undefined;
794 var n = try posix.kevent(w.os.kq_fd, &.{}, &event_buffer, timeout.toTimespec(×pec_buffer));
795 if (n == 0) return .timeout;
796 const reaction_sets = w.os.handles.items(.rs);
797 var any_dirty = markDirtySteps(gpa, reaction_sets, event_buffer[0..n], false);
798 timespec_buffer = .{ .sec = 0, .nsec = 0 };
799 while (n == event_buffer.len) {
800 n = try posix.kevent(w.os.kq_fd, &.{}, &event_buffer, ×pec_buffer);
801 if (n == 0) break;
802 any_dirty = markDirtySteps(gpa, reaction_sets, event_buffer[0..n], any_dirty);
803 }
804 return if (any_dirty) .dirty else .clean;
805 }
806
807 fn markDirtySteps(
808 gpa: Allocator,
809 reaction_sets: []ReactionSet,
810 events: []const std.c.Kevent,
811 start_any_dirty: bool,
812 ) bool {
813 var any_dirty = start_any_dirty;
814 for (events) |event| {
815 const index: usize = @intCast(event.udata);
816 const reaction_set = &reaction_sets[index];
817 // If we knew the basename of the changed file, here we would
818 // mark only the step set dirty, and possibly the glob set:
819 //if (reaction_set.getPtr(".")) |glob_set|
820 // any_dirty = markStepSetDirty(gpa, glob_set, any_dirty);
821 //if (reaction_set.getPtr(file_name)) |step_set|
822 // any_dirty = markStepSetDirty(gpa, step_set, any_dirty);
823 // However we don't know the file name so just mark all the
824 // sets dirty for this directory.
825 for (reaction_set.values()) |*step_set| {
826 any_dirty = markStepSetDirty(gpa, step_set, any_dirty);
827 }
828 }
829 return any_dirty;
830 }
831 },
832 .macos => struct {
833 fse: FsEvents,
834
835 fn init() !Watch {
836 return .{
837 .os = .{ .fse = try .init() },
838 .dir_count = 0,
839 .dir_table = undefined,
840 .generation = undefined,
841 };
842 }
843 fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
844 try w.os.fse.setPaths(gpa, steps);
845 w.dir_count = w.os.fse.watch_roots.len;
846 }
847 fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
848 return w.os.fse.wait(gpa, switch (timeout) {
849 .none => null,
850 .ms => |ms| @as(u64, ms) * std.time.ns_per_ms,
851 });
852 }
853 },
854 else => void,
855};
856
857pub fn init() !Watch {
858 return Os.init();
859}
860
861pub const Match = struct {
862 /// Relative to the watched directory, the file path that triggers this
863 /// match.
864 basename: []const u8,
865 /// The step to re-run when file corresponding to `basename` is changed.
866 step: *Step,
867
868 pub const Context = struct {
869 pub fn hash(self: Context, a: Match) u32 {
870 _ = self;
871 var hasher = Hash.init(0);
872 std.hash.autoHash(&hasher, a.step);
873 hasher.update(a.basename);
874 return @truncate(hasher.final());
875 }
876 pub fn eql(self: Context, a: Match, b: Match, b_index: usize) bool {
877 _ = self;
878 _ = b_index;
879 return a.step == b.step and std.mem.eql(u8, a.basename, b.basename);
880 }
881 };
882};
883
884fn markAllFilesDirty(w: *Watch, gpa: Allocator) void {
885 for (w.os.handle_table.values()) |value| {
886 const reaction_set = switch (builtin.os.tag) {
887 .linux => value.reaction_set,
888 else => value,
889 };
890 for (reaction_set.values()) |step_set| {
891 for (step_set.keys()) |step| {
892 step.recursiveReset(gpa);
893 }
894 }
895 }
896}
897
898fn markStepSetDirty(gpa: Allocator, step_set: *StepSet, any_dirty: bool) bool {
899 var this_any_dirty = false;
900 for (step_set.keys()) |step| {
901 if (step.state != .precheck_done) {
902 step.recursiveReset(gpa);
903 this_any_dirty = true;
904 }
905 }
906 return any_dirty or this_any_dirty;
907}
908
909pub fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
910 return Os.update(w, gpa, steps);
911}
912
913pub const Timeout = union(enum) {
914 none,
915 ms: u16,
916
917 pub fn to_i32_ms(t: Timeout) i32 {
918 return switch (t) {
919 .none => -1,
920 .ms => |ms| ms,
921 };
922 }
923
924 pub fn toTimespec(t: Timeout, buf: *std.posix.timespec) ?*std.posix.timespec {
925 return switch (t) {
926 .none => null,
927 .ms => |ms_u16| {
928 const ms: isize = ms_u16;
929 buf.* = .{
930 .sec = @divTrunc(ms, std.time.ms_per_s),
931 .nsec = @rem(ms, std.time.ms_per_s) * std.time.ns_per_ms,
932 };
933 return buf;
934 },
935 };
936 }
937};
938
939pub const WaitResult = enum {
940 timeout,
941 /// File system watching triggered on files that were marked as inputs to at least one Step.
942 /// Relevant steps have been marked dirty.
943 dirty,
944 /// File system watching triggered but none of the events were relevant to
945 /// what we are listening to. There is nothing to do.
946 clean,
947};
948
949pub fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
950 return Os.wait(w, gpa, timeout);
951}