master
1//! This API is non-allocating, non-fallible, thread-safe, and lock-free.
2
3const std = @import("std");
4const builtin = @import("builtin");
5const windows = std.os.windows;
6const testing = std.testing;
7const assert = std.debug.assert;
8const Progress = @This();
9const posix = std.posix;
10const is_big_endian = builtin.cpu.arch.endian() == .big;
11const is_windows = builtin.os.tag == .windows;
12const Writer = std.Io.Writer;
13
14/// `null` if the current node (and its children) should
15/// not print on update()
16terminal: std.fs.File,
17
18terminal_mode: TerminalMode,
19
20update_thread: ?std.Thread,
21
22/// Atomically set by SIGWINCH as well as the root done() function.
23redraw_event: std.Thread.ResetEvent,
24/// Indicates a request to shut down and reset global state.
25/// Accessed atomically.
26done: bool,
27need_clear: bool,
28status: Status,
29
30refresh_rate_ns: u64,
31initial_delay_ns: u64,
32
33rows: u16,
34cols: u16,
35
36/// Accessed only by the update thread.
37draw_buffer: []u8,
38
39/// This is in a separate array from `node_storage` but with the same length so
40/// that it can be iterated over efficiently without trashing too much of the
41/// CPU cache.
42node_parents: []Node.Parent,
43node_storage: []Node.Storage,
44node_freelist_next: []Node.OptionalIndex,
45node_freelist: Freelist,
46/// This is the number of elements in node arrays which have been used so far. Nodes before this
47/// index are either active, or on the freelist. The remaining nodes are implicitly free. This
48/// value may at times temporarily exceed the node count.
49node_end_index: u32,
50
51pub const Status = enum {
52 /// Indicates the application is progressing towards completion of a task.
53 /// Unless the application is interactive, this is the only status the
54 /// program will ever have!
55 working,
56 /// The application has completed an operation, and is now waiting for user
57 /// input rather than calling exit(0).
58 success,
59 /// The application encountered an error, and is now waiting for user input
60 /// rather than calling exit(1).
61 failure,
62 /// The application encountered at least one error, but is still working on
63 /// more tasks.
64 failure_working,
65};
66
67const Freelist = packed struct(u32) {
68 head: Node.OptionalIndex,
69 /// Whenever `node_freelist` is added to, this generation is incremented
70 /// to avoid ABA bugs when acquiring nodes. Wrapping arithmetic is used.
71 generation: u24,
72};
73
74pub const TerminalMode = union(enum) {
75 off,
76 ansi_escape_codes,
77 /// This is not the same as being run on windows because other terminals
78 /// exist like MSYS/git-bash.
79 windows_api: if (is_windows) WindowsApi else void,
80
81 pub const WindowsApi = struct {
82 /// The output code page of the console.
83 code_page: windows.UINT,
84 };
85};
86
87pub const Options = struct {
88 /// User-provided buffer with static lifetime.
89 ///
90 /// Used to store the entire write buffer sent to the terminal. Progress output will be truncated if it
91 /// cannot fit into this buffer which will look bad but not cause any malfunctions.
92 ///
93 /// Must be at least 200 bytes.
94 draw_buffer: []u8 = &default_draw_buffer,
95 /// How many nanoseconds between writing updates to the terminal.
96 refresh_rate_ns: u64 = 80 * std.time.ns_per_ms,
97 /// How many nanoseconds to keep the output hidden
98 initial_delay_ns: u64 = 200 * std.time.ns_per_ms,
99 /// If provided, causes the progress item to have a denominator.
100 /// 0 means unknown.
101 estimated_total_items: usize = 0,
102 root_name: []const u8 = "",
103 disable_printing: bool = false,
104};
105
106/// Represents one unit of progress. Each node can have children nodes, or
107/// one can use integers with `update`.
108pub const Node = struct {
109 index: OptionalIndex,
110
111 pub const none: Node = .{ .index = .none };
112
113 pub const max_name_len = 40;
114
115 const Storage = extern struct {
116 /// Little endian.
117 completed_count: u32,
118 /// 0 means unknown.
119 /// Little endian.
120 estimated_total_count: u32,
121 name: [max_name_len]u8 align(@alignOf(usize)),
122
123 /// Not thread-safe.
124 fn getIpcFd(s: Storage) ?posix.fd_t {
125 return if (s.estimated_total_count == std.math.maxInt(u32)) switch (@typeInfo(posix.fd_t)) {
126 .int => @bitCast(s.completed_count),
127 .pointer => @ptrFromInt(s.completed_count),
128 else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
129 } else null;
130 }
131
132 /// Thread-safe.
133 fn setIpcFd(s: *Storage, fd: posix.fd_t) void {
134 const integer: u32 = switch (@typeInfo(posix.fd_t)) {
135 .int => @bitCast(fd),
136 .pointer => @intFromPtr(fd),
137 else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
138 };
139 // `estimated_total_count` max int indicates the special state that
140 // causes `completed_count` to be treated as a file descriptor, so
141 // the order here matters.
142 @atomicStore(u32, &s.completed_count, integer, .monotonic);
143 @atomicStore(u32, &s.estimated_total_count, std.math.maxInt(u32), .release); // synchronizes with acquire in `serialize`
144 }
145
146 /// Not thread-safe.
147 fn byteSwap(s: *Storage) void {
148 s.completed_count = @byteSwap(s.completed_count);
149 s.estimated_total_count = @byteSwap(s.estimated_total_count);
150 }
151
152 comptime {
153 assert((@sizeOf(Storage) % 4) == 0);
154 }
155 };
156
157 const Parent = enum(u8) {
158 /// Unallocated storage.
159 unused = std.math.maxInt(u8) - 1,
160 /// Indicates root node.
161 none = std.math.maxInt(u8),
162 /// Index into `node_storage`.
163 _,
164
165 fn unwrap(i: @This()) ?Index {
166 return switch (i) {
167 .unused, .none => return null,
168 else => @enumFromInt(@intFromEnum(i)),
169 };
170 }
171 };
172
173 pub const OptionalIndex = enum(u8) {
174 none = std.math.maxInt(u8),
175 /// Index into `node_storage`.
176 _,
177
178 pub fn unwrap(i: @This()) ?Index {
179 if (i == .none) return null;
180 return @enumFromInt(@intFromEnum(i));
181 }
182
183 fn toParent(i: @This()) Parent {
184 assert(@intFromEnum(i) != @intFromEnum(Parent.unused));
185 return @enumFromInt(@intFromEnum(i));
186 }
187 };
188
189 /// Index into `node_storage`.
190 pub const Index = enum(u8) {
191 _,
192
193 fn toParent(i: @This()) Parent {
194 assert(@intFromEnum(i) != @intFromEnum(Parent.unused));
195 assert(@intFromEnum(i) != @intFromEnum(Parent.none));
196 return @enumFromInt(@intFromEnum(i));
197 }
198
199 pub fn toOptional(i: @This()) OptionalIndex {
200 return @enumFromInt(@intFromEnum(i));
201 }
202 };
203
204 /// Create a new child progress node. Thread-safe.
205 ///
206 /// Passing 0 for `estimated_total_items` means unknown.
207 pub fn start(node: Node, name: []const u8, estimated_total_items: usize) Node {
208 if (noop_impl) {
209 assert(node.index == .none);
210 return Node.none;
211 }
212 const node_index = node.index.unwrap() orelse return Node.none;
213 const parent = node_index.toParent();
214
215 const freelist = &global_progress.node_freelist;
216 var old_freelist = @atomicLoad(Freelist, freelist, .acquire); // acquire to ensure we have the correct "next" entry
217 while (old_freelist.head.unwrap()) |free_index| {
218 const next_ptr = freelistNextByIndex(free_index);
219 const new_freelist: Freelist = .{
220 .head = @atomicLoad(Node.OptionalIndex, next_ptr, .monotonic),
221 // We don't need to increment the generation when removing nodes from the free list,
222 // only when adding them. (This choice is arbitrary; the opposite would also work.)
223 .generation = old_freelist.generation,
224 };
225 old_freelist = @cmpxchgWeak(
226 Freelist,
227 freelist,
228 old_freelist,
229 new_freelist,
230 .acquire, // not theoretically necessary, but not allowed to be weaker than the failure order
231 .acquire, // ensure we have the correct `node_freelist_next` entry on the next iteration
232 ) orelse {
233 // We won the allocation race.
234 return init(free_index, parent, name, estimated_total_items);
235 };
236 }
237
238 const free_index = @atomicRmw(u32, &global_progress.node_end_index, .Add, 1, .monotonic);
239 if (free_index >= global_progress.node_storage.len) {
240 // Ran out of node storage memory. Progress for this node will not be tracked.
241 _ = @atomicRmw(u32, &global_progress.node_end_index, .Sub, 1, .monotonic);
242 return Node.none;
243 }
244
245 return init(@enumFromInt(free_index), parent, name, estimated_total_items);
246 }
247
248 /// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe.
249 pub fn completeOne(n: Node) void {
250 const index = n.index.unwrap() orelse return;
251 const storage = storageByIndex(index);
252 _ = @atomicRmw(u32, &storage.completed_count, .Add, 1, .monotonic);
253 }
254
255 /// Thread-safe. Bytes after '0' in `new_name` are ignored.
256 pub fn setName(n: Node, new_name: []const u8) void {
257 const index = n.index.unwrap() orelse return;
258 const storage = storageByIndex(index);
259
260 const name_len = @min(max_name_len, std.mem.indexOfScalar(u8, new_name, 0) orelse new_name.len);
261
262 copyAtomicStore(storage.name[0..name_len], new_name[0..name_len]);
263 if (name_len < storage.name.len)
264 @atomicStore(u8, &storage.name[name_len], 0, .monotonic);
265 }
266
267 /// Gets the name of this `Node`.
268 /// A pointer to this array can later be passed to `setName` to restore the name.
269 pub fn getName(n: Node) [max_name_len]u8 {
270 var dest: [max_name_len]u8 align(@alignOf(usize)) = undefined;
271 if (n.index.unwrap()) |index| {
272 copyAtomicLoad(&dest, &storageByIndex(index).name);
273 }
274 return dest;
275 }
276
277 /// Thread-safe.
278 pub fn setCompletedItems(n: Node, completed_items: usize) void {
279 const index = n.index.unwrap() orelse return;
280 const storage = storageByIndex(index);
281 @atomicStore(u32, &storage.completed_count, std.math.lossyCast(u32, completed_items), .monotonic);
282 }
283
284 /// Thread-safe. 0 means unknown.
285 pub fn setEstimatedTotalItems(n: Node, count: usize) void {
286 const index = n.index.unwrap() orelse return;
287 const storage = storageByIndex(index);
288 // Avoid u32 max int which is used to indicate a special state.
289 const saturated = @min(std.math.maxInt(u32) - 1, count);
290 @atomicStore(u32, &storage.estimated_total_count, saturated, .monotonic);
291 }
292
293 /// Thread-safe.
294 pub fn increaseEstimatedTotalItems(n: Node, count: usize) void {
295 const index = n.index.unwrap() orelse return;
296 const storage = storageByIndex(index);
297 _ = @atomicRmw(u32, &storage.estimated_total_count, .Add, std.math.lossyCast(u32, count), .monotonic);
298 }
299
300 /// Finish a started `Node`. Thread-safe.
301 pub fn end(n: Node) void {
302 if (noop_impl) {
303 assert(n.index == .none);
304 return;
305 }
306 const index = n.index.unwrap() orelse return;
307 const parent_ptr = parentByIndex(index);
308 if (@atomicLoad(Node.Parent, parent_ptr, .monotonic).unwrap()) |parent_index| {
309 _ = @atomicRmw(u32, &storageByIndex(parent_index).completed_count, .Add, 1, .monotonic);
310 @atomicStore(Node.Parent, parent_ptr, .unused, .monotonic);
311
312 const freelist = &global_progress.node_freelist;
313 var old_freelist = @atomicLoad(Freelist, freelist, .monotonic);
314 while (true) {
315 @atomicStore(Node.OptionalIndex, freelistNextByIndex(index), old_freelist.head, .monotonic);
316 old_freelist = @cmpxchgWeak(
317 Freelist,
318 freelist,
319 old_freelist,
320 .{ .head = index.toOptional(), .generation = old_freelist.generation +% 1 },
321 .release, // ensure a matching `start` sees the freelist link written above
322 .monotonic, // our write above is irrelevant if we need to retry
323 ) orelse {
324 // We won the race.
325 return;
326 };
327 }
328 } else {
329 @atomicStore(bool, &global_progress.done, true, .monotonic);
330 global_progress.redraw_event.set();
331 if (global_progress.update_thread) |thread| thread.join();
332 }
333 }
334
335 /// Posix-only. Used by `std.process.Child`. Thread-safe.
336 pub fn setIpcFd(node: Node, fd: posix.fd_t) void {
337 const index = node.index.unwrap() orelse return;
338 assert(fd >= 0);
339 assert(fd != posix.STDOUT_FILENO);
340 assert(fd != posix.STDIN_FILENO);
341 assert(fd != posix.STDERR_FILENO);
342 storageByIndex(index).setIpcFd(fd);
343 }
344
345 /// Posix-only. Thread-safe. Assumes the node is storing an IPC file
346 /// descriptor.
347 pub fn getIpcFd(node: Node) ?posix.fd_t {
348 const index = node.index.unwrap() orelse return null;
349 const storage = storageByIndex(index);
350 const int = @atomicLoad(u32, &storage.completed_count, .monotonic);
351 return switch (@typeInfo(posix.fd_t)) {
352 .int => @bitCast(int),
353 .pointer => @ptrFromInt(int),
354 else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
355 };
356 }
357
358 fn storageByIndex(index: Node.Index) *Node.Storage {
359 return &global_progress.node_storage[@intFromEnum(index)];
360 }
361
362 fn parentByIndex(index: Node.Index) *Node.Parent {
363 return &global_progress.node_parents[@intFromEnum(index)];
364 }
365
366 fn freelistNextByIndex(index: Node.Index) *Node.OptionalIndex {
367 return &global_progress.node_freelist_next[@intFromEnum(index)];
368 }
369
370 fn init(free_index: Index, parent: Parent, name: []const u8, estimated_total_items: usize) Node {
371 assert(parent == .none or @intFromEnum(parent) < node_storage_buffer_len);
372
373 const storage = storageByIndex(free_index);
374 @atomicStore(u32, &storage.completed_count, 0, .monotonic);
375 @atomicStore(u32, &storage.estimated_total_count, std.math.lossyCast(u32, estimated_total_items), .monotonic);
376 const name_len = @min(max_name_len, name.len);
377 copyAtomicStore(storage.name[0..name_len], name[0..name_len]);
378 if (name_len < storage.name.len)
379 @atomicStore(u8, &storage.name[name_len], 0, .monotonic);
380
381 const parent_ptr = parentByIndex(free_index);
382 if (std.debug.runtime_safety) {
383 assert(@atomicLoad(Node.Parent, parent_ptr, .monotonic) == .unused);
384 }
385 @atomicStore(Node.Parent, parent_ptr, parent, .monotonic);
386
387 return .{ .index = free_index.toOptional() };
388 }
389};
390
391var global_progress: Progress = .{
392 .terminal = undefined,
393 .terminal_mode = .off,
394 .update_thread = null,
395 .redraw_event = .unset,
396 .refresh_rate_ns = undefined,
397 .initial_delay_ns = undefined,
398 .rows = 0,
399 .cols = 0,
400 .draw_buffer = undefined,
401 .done = false,
402 .need_clear = false,
403 .status = .working,
404
405 .node_parents = &node_parents_buffer,
406 .node_storage = &node_storage_buffer,
407 .node_freelist_next = &node_freelist_next_buffer,
408 .node_freelist = .{ .head = .none, .generation = 0 },
409 .node_end_index = 0,
410};
411
412const node_storage_buffer_len = 83;
413var node_parents_buffer: [node_storage_buffer_len]Node.Parent = undefined;
414var node_storage_buffer: [node_storage_buffer_len]Node.Storage = undefined;
415var node_freelist_next_buffer: [node_storage_buffer_len]Node.OptionalIndex = undefined;
416
417var default_draw_buffer: [4096]u8 = undefined;
418
419var debug_start_trace = std.debug.Trace.init;
420
421pub const have_ipc = switch (builtin.os.tag) {
422 .wasi, .freestanding, .windows => false,
423 else => true,
424};
425
426const noop_impl = builtin.single_threaded or switch (builtin.os.tag) {
427 .wasi, .freestanding => true,
428 else => false,
429} or switch (builtin.zig_backend) {
430 else => false,
431};
432
433/// Initializes a global Progress instance.
434///
435/// Asserts there is only one global Progress instance.
436///
437/// Call `Node.end` when done.
438pub fn start(options: Options) Node {
439 // Ensure there is only 1 global Progress object.
440 if (global_progress.node_end_index != 0) {
441 debug_start_trace.dump();
442 unreachable;
443 }
444 debug_start_trace.add("first initialized here");
445
446 @memset(global_progress.node_parents, .unused);
447 const root_node = Node.init(@enumFromInt(0), .none, options.root_name, options.estimated_total_items);
448 global_progress.done = false;
449 global_progress.node_end_index = 1;
450
451 assert(options.draw_buffer.len >= 200);
452 global_progress.draw_buffer = options.draw_buffer;
453 global_progress.refresh_rate_ns = options.refresh_rate_ns;
454 global_progress.initial_delay_ns = options.initial_delay_ns;
455
456 if (noop_impl)
457 return Node.none;
458
459 if (std.process.parseEnvVarInt("ZIG_PROGRESS", u31, 10)) |ipc_fd| {
460 global_progress.update_thread = std.Thread.spawn(.{}, ipcThreadRun, .{
461 @as(posix.fd_t, switch (@typeInfo(posix.fd_t)) {
462 .int => ipc_fd,
463 .pointer => @ptrFromInt(ipc_fd),
464 else => @compileError("unsupported fd_t of " ++ @typeName(posix.fd_t)),
465 }),
466 }) catch |err| {
467 std.log.warn("failed to spawn IPC thread for communicating progress to parent: {s}", .{@errorName(err)});
468 return Node.none;
469 };
470 } else |env_err| switch (env_err) {
471 error.EnvironmentVariableNotFound => {
472 if (options.disable_printing) {
473 return Node.none;
474 }
475 const stderr: std.fs.File = .stderr();
476 global_progress.terminal = stderr;
477 if (stderr.getOrEnableAnsiEscapeSupport()) {
478 global_progress.terminal_mode = .ansi_escape_codes;
479 } else if (is_windows and stderr.isTty()) {
480 global_progress.terminal_mode = TerminalMode{ .windows_api = .{
481 .code_page = windows.kernel32.GetConsoleOutputCP(),
482 } };
483 }
484
485 if (global_progress.terminal_mode == .off) {
486 return Node.none;
487 }
488
489 if (have_sigwinch) {
490 const act: posix.Sigaction = .{
491 .handler = .{ .sigaction = handleSigWinch },
492 .mask = posix.sigemptyset(),
493 .flags = (posix.SA.SIGINFO | posix.SA.RESTART),
494 };
495 posix.sigaction(.WINCH, &act, null);
496 }
497
498 if (switch (global_progress.terminal_mode) {
499 .off => unreachable, // handled a few lines above
500 .ansi_escape_codes => std.Thread.spawn(.{}, updateThreadRun, .{}),
501 .windows_api => if (is_windows) std.Thread.spawn(.{}, windowsApiUpdateThreadRun, .{}) else unreachable,
502 }) |thread| {
503 global_progress.update_thread = thread;
504 } else |err| {
505 std.log.warn("unable to spawn thread for printing progress to terminal: {s}", .{@errorName(err)});
506 return Node.none;
507 }
508 },
509 else => |e| {
510 std.log.warn("invalid ZIG_PROGRESS file descriptor integer: {s}", .{@errorName(e)});
511 return Node.none;
512 },
513 }
514
515 return root_node;
516}
517
518pub fn setStatus(new_status: Status) void {
519 if (noop_impl) return;
520 @atomicStore(Status, &global_progress.status, new_status, .monotonic);
521}
522
523/// Returns whether a resize is needed to learn the terminal size.
524fn wait(timeout_ns: u64) bool {
525 const resize_flag = if (global_progress.redraw_event.timedWait(timeout_ns)) |_| true else |err| switch (err) {
526 error.Timeout => false,
527 };
528 global_progress.redraw_event.reset();
529 return resize_flag or (global_progress.cols == 0);
530}
531
532fn updateThreadRun() void {
533 // Store this data in the thread so that it does not need to be part of the
534 // linker data of the main executable.
535 var serialized_buffer: Serialized.Buffer = undefined;
536
537 {
538 const resize_flag = wait(global_progress.initial_delay_ns);
539 if (@atomicLoad(bool, &global_progress.done, .monotonic)) return;
540 maybeUpdateSize(resize_flag);
541
542 const buffer, _ = computeRedraw(&serialized_buffer);
543 if (stderr_mutex.tryLock()) {
544 defer stderr_mutex.unlock();
545 write(buffer) catch return;
546 global_progress.need_clear = true;
547 }
548 }
549
550 while (true) {
551 const resize_flag = wait(global_progress.refresh_rate_ns);
552
553 if (@atomicLoad(bool, &global_progress.done, .monotonic)) {
554 stderr_mutex.lock();
555 defer stderr_mutex.unlock();
556 return clearWrittenWithEscapeCodes() catch {};
557 }
558
559 maybeUpdateSize(resize_flag);
560
561 const buffer, _ = computeRedraw(&serialized_buffer);
562 if (stderr_mutex.tryLock()) {
563 defer stderr_mutex.unlock();
564 write(buffer) catch return;
565 global_progress.need_clear = true;
566 }
567 }
568}
569
570fn windowsApiWriteMarker() void {
571 // Write the marker that we will use to find the beginning of the progress when clearing.
572 // Note: This doesn't have to use WriteConsoleW, but doing so avoids dealing with the code page.
573 var num_chars_written: windows.DWORD = undefined;
574 const handle = global_progress.terminal.handle;
575 _ = windows.kernel32.WriteConsoleW(handle, &[_]u16{windows_api_start_marker}, 1, &num_chars_written, null);
576}
577
578fn windowsApiUpdateThreadRun() void {
579 var serialized_buffer: Serialized.Buffer = undefined;
580
581 {
582 const resize_flag = wait(global_progress.initial_delay_ns);
583 if (@atomicLoad(bool, &global_progress.done, .monotonic)) return;
584 maybeUpdateSize(resize_flag);
585
586 const buffer, const nl_n = computeRedraw(&serialized_buffer);
587 if (stderr_mutex.tryLock()) {
588 defer stderr_mutex.unlock();
589 windowsApiWriteMarker();
590 write(buffer) catch return;
591 global_progress.need_clear = true;
592 windowsApiMoveToMarker(nl_n) catch return;
593 }
594 }
595
596 while (true) {
597 const resize_flag = wait(global_progress.refresh_rate_ns);
598
599 if (@atomicLoad(bool, &global_progress.done, .monotonic)) {
600 stderr_mutex.lock();
601 defer stderr_mutex.unlock();
602 return clearWrittenWindowsApi() catch {};
603 }
604
605 maybeUpdateSize(resize_flag);
606
607 const buffer, const nl_n = computeRedraw(&serialized_buffer);
608 if (stderr_mutex.tryLock()) {
609 defer stderr_mutex.unlock();
610 clearWrittenWindowsApi() catch return;
611 windowsApiWriteMarker();
612 write(buffer) catch return;
613 global_progress.need_clear = true;
614 windowsApiMoveToMarker(nl_n) catch return;
615 }
616 }
617}
618
619/// Allows the caller to freely write to stderr until `unlockStdErr` is called.
620///
621/// During the lock, any `std.Progress` information is cleared from the terminal.
622///
623/// The lock is recursive; the same thread may hold the lock multiple times.
624pub fn lockStdErr() void {
625 stderr_mutex.lock();
626 clearWrittenWithEscapeCodes() catch {};
627}
628
629pub fn unlockStdErr() void {
630 stderr_mutex.unlock();
631}
632
633/// Protected by `stderr_mutex`.
634const stderr_writer: *Writer = &stderr_file_writer.interface;
635/// Protected by `stderr_mutex`.
636var stderr_file_writer: std.fs.File.Writer = .{
637 .interface = std.fs.File.Writer.initInterface(&.{}),
638 .file = if (is_windows) undefined else .stderr(),
639 .mode = .streaming,
640};
641
642/// Allows the caller to freely write to the returned `Writer`,
643/// initialized with `buffer`, until `unlockStderrWriter` is called.
644///
645/// During the lock, any `std.Progress` information is cleared from the terminal.
646///
647/// The lock is recursive; the same thread may hold the lock multiple times.
648pub fn lockStderrWriter(buffer: []u8) *Writer {
649 stderr_mutex.lock();
650 clearWrittenWithEscapeCodes() catch {};
651 if (is_windows) stderr_file_writer.file = .stderr();
652 stderr_writer.flush() catch {};
653 stderr_writer.buffer = buffer;
654 return stderr_writer;
655}
656
657pub fn unlockStderrWriter() void {
658 stderr_writer.flush() catch {};
659 stderr_writer.end = 0;
660 stderr_writer.buffer = &.{};
661 stderr_mutex.unlock();
662}
663
664fn ipcThreadRun(fd: posix.fd_t) anyerror!void {
665 // Store this data in the thread so that it does not need to be part of the
666 // linker data of the main executable.
667 var serialized_buffer: Serialized.Buffer = undefined;
668
669 {
670 _ = wait(global_progress.initial_delay_ns);
671
672 if (@atomicLoad(bool, &global_progress.done, .monotonic))
673 return;
674
675 const serialized = serialize(&serialized_buffer);
676 writeIpc(fd, serialized) catch |err| switch (err) {
677 error.BrokenPipe => return,
678 };
679 }
680
681 while (true) {
682 _ = wait(global_progress.refresh_rate_ns);
683
684 if (@atomicLoad(bool, &global_progress.done, .monotonic))
685 return;
686
687 const serialized = serialize(&serialized_buffer);
688 writeIpc(fd, serialized) catch |err| switch (err) {
689 error.BrokenPipe => return,
690 };
691 }
692}
693
694const start_sync = "\x1b[?2026h";
695const up_one_line = "\x1bM";
696const clear = "\x1b[J";
697const save = "\x1b7";
698const restore = "\x1b8";
699const finish_sync = "\x1b[?2026l";
700
701const progress_remove = "\x1b]9;4;0\x1b\\";
702const @"progress_normal {d}" = "\x1b]9;4;1;{d}\x1b\\";
703const @"progress_error {d}" = "\x1b]9;4;2;{d}\x1b\\";
704const progress_pulsing = "\x1b]9;4;3\x1b\\";
705const progress_pulsing_error = "\x1b]9;4;2\x1b\\";
706const progress_normal_100 = "\x1b]9;4;1;100\x1b\\";
707const progress_error_100 = "\x1b]9;4;2;100\x1b\\";
708
709const TreeSymbol = enum {
710 /// ├─
711 tee,
712 /// │
713 line,
714 /// └─
715 langle,
716
717 const Encoding = enum {
718 ansi_escapes,
719 code_page_437,
720 utf8,
721 ascii,
722 };
723
724 /// The escape sequence representation as a string literal
725 fn escapeSeq(symbol: TreeSymbol) *const [9:0]u8 {
726 return switch (symbol) {
727 .tee => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ",
728 .line => "\x1B\x28\x30\x78\x1B\x28\x42 ",
729 .langle => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ",
730 };
731 }
732
733 fn bytes(symbol: TreeSymbol, encoding: Encoding) []const u8 {
734 return switch (encoding) {
735 .ansi_escapes => escapeSeq(symbol),
736 .code_page_437 => switch (symbol) {
737 .tee => "\xC3\xC4 ",
738 .line => "\xB3 ",
739 .langle => "\xC0\xC4 ",
740 },
741 .utf8 => switch (symbol) {
742 .tee => "├─ ",
743 .line => "│ ",
744 .langle => "└─ ",
745 },
746 .ascii => switch (symbol) {
747 .tee => "|- ",
748 .line => "| ",
749 .langle => "+- ",
750 },
751 };
752 }
753
754 fn maxByteLen(symbol: TreeSymbol) usize {
755 var max: usize = 0;
756 inline for (@typeInfo(Encoding).@"enum".fields) |field| {
757 const len = symbol.bytes(@field(Encoding, field.name)).len;
758 max = @max(max, len);
759 }
760 return max;
761 }
762};
763
764fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize {
765 switch (global_progress.terminal_mode) {
766 .off => unreachable,
767 .ansi_escape_codes => {
768 const bytes = symbol.escapeSeq();
769 buf[start_i..][0..bytes.len].* = bytes.*;
770 return start_i + bytes.len;
771 },
772 .windows_api => |windows_api| {
773 const bytes = if (!is_windows) unreachable else switch (windows_api.code_page) {
774 // Code page 437 is the default code page and contains the box drawing symbols
775 437 => symbol.bytes(.code_page_437),
776 // UTF-8
777 65001 => symbol.bytes(.utf8),
778 // Fall back to ASCII approximation
779 else => symbol.bytes(.ascii),
780 };
781 @memcpy(buf[start_i..][0..bytes.len], bytes);
782 return start_i + bytes.len;
783 },
784 }
785}
786
787fn clearWrittenWithEscapeCodes() anyerror!void {
788 if (noop_impl or !global_progress.need_clear) return;
789
790 global_progress.need_clear = false;
791 try write(clear ++ progress_remove);
792}
793
794/// U+25BA or â–º
795const windows_api_start_marker = 0x25BA;
796
797fn clearWrittenWindowsApi() error{Unexpected}!void {
798 // This uses a 'marker' strategy. The idea is:
799 // - Always write a marker (in this case U+25BA or â–º) at the beginning of the progress
800 // - Get the current cursor position (at the end of the progress)
801 // - Subtract the number of lines written to get the expected start of the progress
802 // - Check to see if the first character at the start of the progress is the marker
803 // - If it's not the marker, keep checking the line before until we find it
804 // - Clear the screen from that position down, and set the cursor position to the start
805 //
806 // This strategy works even if there is line wrapping, and can handle the window
807 // being resized/scrolled arbitrarily.
808 //
809 // Notes:
810 // - Ideally, the marker would be a zero-width character, but the Windows console
811 // doesn't seem to support rendering zero-width characters (they show up as a space)
812 // - This same marker idea could technically be done with an attribute instead
813 // (https://learn.microsoft.com/en-us/windows/console/console-screen-buffers#character-attributes)
814 // but it must be a valid attribute and it actually needs to apply to the first
815 // character in order to be readable via ReadConsoleOutputAttribute. It doesn't seem
816 // like any of the available attributes are invisible/benign.
817 if (!global_progress.need_clear) return;
818 const handle = global_progress.terminal.handle;
819 const screen_area = @as(windows.DWORD, global_progress.cols) * global_progress.rows;
820
821 var console_info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
822 if (windows.kernel32.GetConsoleScreenBufferInfo(handle, &console_info) == 0) {
823 return error.Unexpected;
824 }
825 var num_chars_written: windows.DWORD = undefined;
826 if (windows.kernel32.FillConsoleOutputCharacterW(handle, ' ', screen_area, console_info.dwCursorPosition, &num_chars_written) == 0) {
827 return error.Unexpected;
828 }
829}
830
831fn windowsApiMoveToMarker(nl_n: usize) error{Unexpected}!void {
832 const handle = global_progress.terminal.handle;
833 var console_info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
834 if (windows.kernel32.GetConsoleScreenBufferInfo(handle, &console_info) == 0) {
835 return error.Unexpected;
836 }
837 const cursor_pos = console_info.dwCursorPosition;
838 const expected_y = cursor_pos.Y - @as(i16, @intCast(nl_n));
839 var start_pos: windows.COORD = .{ .X = 0, .Y = expected_y };
840 while (start_pos.Y >= 0) {
841 var wchar: [1]u16 = undefined;
842 var num_console_chars_read: windows.DWORD = undefined;
843 if (windows.kernel32.ReadConsoleOutputCharacterW(handle, &wchar, wchar.len, start_pos, &num_console_chars_read) == 0) {
844 return error.Unexpected;
845 }
846
847 if (wchar[0] == windows_api_start_marker) break;
848 start_pos.Y -= 1;
849 } else {
850 // If we couldn't find the marker, then just assume that no lines wrapped
851 start_pos = .{ .X = 0, .Y = expected_y };
852 }
853 if (windows.kernel32.SetConsoleCursorPosition(handle, start_pos) == 0) {
854 return error.Unexpected;
855 }
856}
857
858const Children = struct {
859 child: Node.OptionalIndex,
860 sibling: Node.OptionalIndex,
861};
862
863const Serialized = struct {
864 parents: []Node.Parent,
865 storage: []Node.Storage,
866
867 const Buffer = struct {
868 parents: [node_storage_buffer_len]Node.Parent,
869 storage: [node_storage_buffer_len]Node.Storage,
870 map: [node_storage_buffer_len]Node.OptionalIndex,
871
872 parents_copy: [node_storage_buffer_len]Node.Parent,
873 storage_copy: [node_storage_buffer_len]Node.Storage,
874 ipc_metadata_fds_copy: [node_storage_buffer_len]Fd,
875 ipc_metadata_copy: [node_storage_buffer_len]SavedMetadata,
876
877 ipc_metadata_fds: [node_storage_buffer_len]Fd,
878 ipc_metadata: [node_storage_buffer_len]SavedMetadata,
879 };
880};
881
882fn serialize(serialized_buffer: *Serialized.Buffer) Serialized {
883 var serialized_len: usize = 0;
884 var any_ipc = false;
885
886 // Iterate all of the nodes and construct a serializable copy of the state that can be examined
887 // without atomics. The `@min` call is here because `node_end_index` might briefly exceed the
888 // node count sometimes.
889 const end_index = @min(@atomicLoad(u32, &global_progress.node_end_index, .monotonic), global_progress.node_storage.len);
890 for (
891 global_progress.node_parents[0..end_index],
892 global_progress.node_storage[0..end_index],
893 serialized_buffer.map[0..end_index],
894 ) |*parent_ptr, *storage_ptr, *map| {
895 const parent = @atomicLoad(Node.Parent, parent_ptr, .monotonic);
896 if (parent == .unused) {
897 // We might read "mixed" node data in this loop, due to weird atomic things
898 // or just a node actually being freed while this loop runs. That could cause
899 // there to be a parent reference to a nonexistent node. Without this assignment,
900 // this would lead to the map entry containing stale data. By assigning none, the
901 // child node with the bad parent pointer will be harmlessly omitted from the tree.
902 //
903 // Note that there's no concern of potentially creating "looping" data if we read
904 // "mixed" node data like this, because if a node is (directly or indirectly) its own
905 // parent, it will just not be printed at all. The general idea here is that performance
906 // is more important than 100% correct output every frame, given that this API is likely
907 // to be used in hot paths!
908 map.* = .none;
909 continue;
910 }
911 const dest_storage = &serialized_buffer.storage[serialized_len];
912 copyAtomicLoad(&dest_storage.name, &storage_ptr.name);
913 dest_storage.estimated_total_count = @atomicLoad(u32, &storage_ptr.estimated_total_count, .acquire); // sychronizes with release in `setIpcFd`
914 dest_storage.completed_count = @atomicLoad(u32, &storage_ptr.completed_count, .monotonic);
915
916 any_ipc = any_ipc or (dest_storage.getIpcFd() != null);
917 serialized_buffer.parents[serialized_len] = parent;
918 map.* = @enumFromInt(serialized_len);
919 serialized_len += 1;
920 }
921
922 // Remap parents to point inside serialized arrays.
923 for (serialized_buffer.parents[0..serialized_len]) |*parent| {
924 parent.* = switch (parent.*) {
925 .unused => unreachable,
926 .none => .none,
927 _ => |p| serialized_buffer.map[@intFromEnum(p)].toParent(),
928 };
929 }
930
931 // Find nodes which correspond to child processes.
932 if (any_ipc)
933 serialized_len = serializeIpc(serialized_len, serialized_buffer);
934
935 return .{
936 .parents = serialized_buffer.parents[0..serialized_len],
937 .storage = serialized_buffer.storage[0..serialized_len],
938 };
939}
940
941const SavedMetadata = struct {
942 remaining_read_trash_bytes: u16,
943 main_index: u8,
944 start_index: u8,
945 nodes_len: u8,
946};
947
948const Fd = enum(i32) {
949 _,
950
951 fn init(fd: posix.fd_t) Fd {
952 return @enumFromInt(if (is_windows) @as(isize, @bitCast(@intFromPtr(fd))) else fd);
953 }
954
955 fn get(fd: Fd) posix.fd_t {
956 return if (is_windows)
957 @ptrFromInt(@as(usize, @bitCast(@as(isize, @intFromEnum(fd)))))
958 else
959 @intFromEnum(fd);
960 }
961};
962
963var ipc_metadata_len: u8 = 0;
964
965fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buffer) usize {
966 const ipc_metadata_fds_copy = &serialized_buffer.ipc_metadata_fds_copy;
967 const ipc_metadata_copy = &serialized_buffer.ipc_metadata_copy;
968 const ipc_metadata_fds = &serialized_buffer.ipc_metadata_fds;
969 const ipc_metadata = &serialized_buffer.ipc_metadata;
970
971 var serialized_len = start_serialized_len;
972 var pipe_buf: [2 * 4096]u8 = undefined;
973
974 const old_ipc_metadata_fds = ipc_metadata_fds_copy[0..ipc_metadata_len];
975 const old_ipc_metadata = ipc_metadata_copy[0..ipc_metadata_len];
976 ipc_metadata_len = 0;
977
978 main_loop: for (
979 serialized_buffer.parents[0..serialized_len],
980 serialized_buffer.storage[0..serialized_len],
981 0..,
982 ) |main_parent, *main_storage, main_index| {
983 if (main_parent == .unused) continue;
984 const fd = main_storage.getIpcFd() orelse continue;
985 const opt_saved_metadata = findOld(fd, old_ipc_metadata_fds, old_ipc_metadata);
986 var bytes_read: usize = 0;
987 while (true) {
988 const n = posix.read(fd, pipe_buf[bytes_read..]) catch |err| switch (err) {
989 error.WouldBlock => break,
990 else => |e| {
991 std.log.debug("failed to read child progress data: {s}", .{@errorName(e)});
992 main_storage.completed_count = 0;
993 main_storage.estimated_total_count = 0;
994 continue :main_loop;
995 },
996 };
997 if (n == 0) break;
998 if (opt_saved_metadata) |m| {
999 if (m.remaining_read_trash_bytes > 0) {
1000 assert(bytes_read == 0);
1001 if (m.remaining_read_trash_bytes >= n) {
1002 m.remaining_read_trash_bytes = @intCast(m.remaining_read_trash_bytes - n);
1003 continue;
1004 }
1005 const src = pipe_buf[m.remaining_read_trash_bytes..n];
1006 @memmove(pipe_buf[0..src.len], src);
1007 m.remaining_read_trash_bytes = 0;
1008 bytes_read = src.len;
1009 continue;
1010 }
1011 }
1012 bytes_read += n;
1013 }
1014 // Ignore all but the last message on the pipe.
1015 var input: []u8 = pipe_buf[0..bytes_read];
1016 if (input.len == 0) {
1017 serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, opt_saved_metadata, 0, fd);
1018 continue;
1019 }
1020
1021 const storage, const parents = while (true) {
1022 const subtree_len: usize = input[0];
1023 const expected_bytes = 1 + subtree_len * (@sizeOf(Node.Storage) + @sizeOf(Node.Parent));
1024 if (input.len < expected_bytes) {
1025 // Ignore short reads. We'll handle the next full message when it comes instead.
1026 const remaining_read_trash_bytes: u16 = @intCast(expected_bytes - input.len);
1027 serialized_len = useSavedIpcData(serialized_len, serialized_buffer, main_storage, main_index, opt_saved_metadata, remaining_read_trash_bytes, fd);
1028 continue :main_loop;
1029 }
1030 if (input.len > expected_bytes) {
1031 input = input[expected_bytes..];
1032 continue;
1033 }
1034 const storage_bytes = input[1..][0 .. subtree_len * @sizeOf(Node.Storage)];
1035 const parents_bytes = input[1 + storage_bytes.len ..][0 .. subtree_len * @sizeOf(Node.Parent)];
1036 break .{
1037 std.mem.bytesAsSlice(Node.Storage, storage_bytes),
1038 std.mem.bytesAsSlice(Node.Parent, parents_bytes),
1039 };
1040 };
1041
1042 const nodes_len: u8 = @intCast(@min(parents.len - 1, serialized_buffer.storage.len - serialized_len));
1043
1044 // Remember in case the pipe is empty on next update.
1045 ipc_metadata_fds[ipc_metadata_len] = Fd.init(fd);
1046 ipc_metadata[ipc_metadata_len] = .{
1047 .remaining_read_trash_bytes = 0,
1048 .start_index = @intCast(serialized_len),
1049 .nodes_len = nodes_len,
1050 .main_index = @intCast(main_index),
1051 };
1052 ipc_metadata_len += 1;
1053
1054 // Mount the root here.
1055 copyRoot(main_storage, &storage[0]);
1056 if (is_big_endian) main_storage.byteSwap();
1057
1058 // Copy the rest of the tree to the end.
1059 const storage_dest = serialized_buffer.storage[serialized_len..][0..nodes_len];
1060 @memcpy(storage_dest, storage[1..][0..nodes_len]);
1061
1062 // Always little-endian over the pipe.
1063 if (is_big_endian) for (storage_dest) |*s| s.byteSwap();
1064
1065 // Patch up parent pointers taking into account how the subtree is mounted.
1066 for (serialized_buffer.parents[serialized_len..][0..nodes_len], parents[1..][0..nodes_len]) |*dest, p| {
1067 dest.* = switch (p) {
1068 // Fix bad data so the rest of the code does not see `unused`.
1069 .none, .unused => .none,
1070 // Root node is being mounted here.
1071 @as(Node.Parent, @enumFromInt(0)) => @enumFromInt(main_index),
1072 // Other nodes mounted at the end.
1073 // Don't trust child data; if the data is outside the expected range, ignore the data.
1074 // This also handles the case when data was truncated.
1075 _ => |off| if (@intFromEnum(off) > nodes_len)
1076 .none
1077 else
1078 @enumFromInt(serialized_len + @intFromEnum(off) - 1),
1079 };
1080 }
1081
1082 serialized_len += nodes_len;
1083 }
1084
1085 // Save a copy in case any pipes are empty on the next update.
1086 @memcpy(serialized_buffer.parents_copy[0..serialized_len], serialized_buffer.parents[0..serialized_len]);
1087 @memcpy(serialized_buffer.storage_copy[0..serialized_len], serialized_buffer.storage[0..serialized_len]);
1088 @memcpy(ipc_metadata_fds_copy[0..ipc_metadata_len], ipc_metadata_fds[0..ipc_metadata_len]);
1089 @memcpy(ipc_metadata_copy[0..ipc_metadata_len], ipc_metadata[0..ipc_metadata_len]);
1090
1091 return serialized_len;
1092}
1093
1094fn copyRoot(dest: *Node.Storage, src: *align(1) Node.Storage) void {
1095 dest.* = .{
1096 .completed_count = src.completed_count,
1097 .estimated_total_count = src.estimated_total_count,
1098 .name = if (src.name[0] == 0) dest.name else src.name,
1099 };
1100}
1101
1102fn findOld(
1103 ipc_fd: posix.fd_t,
1104 old_metadata_fds: []Fd,
1105 old_metadata: []SavedMetadata,
1106) ?*SavedMetadata {
1107 for (old_metadata_fds, old_metadata) |fd, *m| {
1108 if (fd.get() == ipc_fd)
1109 return m;
1110 }
1111 return null;
1112}
1113
1114fn useSavedIpcData(
1115 start_serialized_len: usize,
1116 serialized_buffer: *Serialized.Buffer,
1117 main_storage: *Node.Storage,
1118 main_index: usize,
1119 opt_saved_metadata: ?*SavedMetadata,
1120 remaining_read_trash_bytes: u16,
1121 fd: posix.fd_t,
1122) usize {
1123 const parents_copy = &serialized_buffer.parents_copy;
1124 const storage_copy = &serialized_buffer.storage_copy;
1125 const ipc_metadata_fds = &serialized_buffer.ipc_metadata_fds;
1126 const ipc_metadata = &serialized_buffer.ipc_metadata;
1127
1128 const saved_metadata = opt_saved_metadata orelse {
1129 main_storage.completed_count = 0;
1130 main_storage.estimated_total_count = 0;
1131 if (remaining_read_trash_bytes > 0) {
1132 ipc_metadata_fds[ipc_metadata_len] = Fd.init(fd);
1133 ipc_metadata[ipc_metadata_len] = .{
1134 .remaining_read_trash_bytes = remaining_read_trash_bytes,
1135 .start_index = @intCast(start_serialized_len),
1136 .nodes_len = 0,
1137 .main_index = @intCast(main_index),
1138 };
1139 ipc_metadata_len += 1;
1140 }
1141 return start_serialized_len;
1142 };
1143
1144 const start_index = saved_metadata.start_index;
1145 const nodes_len = @min(saved_metadata.nodes_len, serialized_buffer.storage.len - start_serialized_len);
1146 const old_main_index = saved_metadata.main_index;
1147
1148 ipc_metadata_fds[ipc_metadata_len] = Fd.init(fd);
1149 ipc_metadata[ipc_metadata_len] = .{
1150 .remaining_read_trash_bytes = remaining_read_trash_bytes,
1151 .start_index = @intCast(start_serialized_len),
1152 .nodes_len = nodes_len,
1153 .main_index = @intCast(main_index),
1154 };
1155 ipc_metadata_len += 1;
1156
1157 const parents = parents_copy[start_index..][0..nodes_len];
1158 const storage = storage_copy[start_index..][0..nodes_len];
1159
1160 copyRoot(main_storage, &storage_copy[old_main_index]);
1161
1162 @memcpy(serialized_buffer.storage[start_serialized_len..][0..storage.len], storage);
1163
1164 for (serialized_buffer.parents[start_serialized_len..][0..parents.len], parents) |*dest, p| {
1165 dest.* = switch (p) {
1166 .none, .unused => .none,
1167 _ => |prev| d: {
1168 if (@intFromEnum(prev) == old_main_index) {
1169 break :d @enumFromInt(main_index);
1170 } else if (@intFromEnum(prev) > nodes_len) {
1171 break :d .none;
1172 } else {
1173 break :d @enumFromInt(@intFromEnum(prev) - start_index + start_serialized_len);
1174 }
1175 },
1176 };
1177 }
1178
1179 return start_serialized_len + storage.len;
1180}
1181
1182fn computeRedraw(serialized_buffer: *Serialized.Buffer) struct { []u8, usize } {
1183 const serialized = serialize(serialized_buffer);
1184
1185 // Now we can analyze our copy of the graph without atomics, reconstructing
1186 // children lists which do not exist in the canonical data. These are
1187 // needed for tree traversal below.
1188
1189 var children_buffer: [node_storage_buffer_len]Children = undefined;
1190 const children = children_buffer[0..serialized.parents.len];
1191
1192 @memset(children, .{ .child = .none, .sibling = .none });
1193
1194 for (serialized.parents, 0..) |parent, child_index_usize| {
1195 const child_index: Node.Index = @enumFromInt(child_index_usize);
1196 assert(parent != .unused);
1197 const parent_index = parent.unwrap() orelse continue;
1198 const children_node = &children[@intFromEnum(parent_index)];
1199 if (children_node.child.unwrap()) |existing_child_index| {
1200 const existing_child = &children[@intFromEnum(existing_child_index)];
1201 children[@intFromEnum(child_index)].sibling = existing_child.sibling;
1202 existing_child.sibling = child_index.toOptional();
1203 } else {
1204 children_node.child = child_index.toOptional();
1205 }
1206 }
1207
1208 // The strategy is, with every redraw:
1209 // erase to end of screen, write, move cursor to beginning of line, move cursor up N lines
1210 // This keeps the cursor at the beginning so that unlocked stderr writes
1211 // don't get eaten by the clear.
1212
1213 var i: usize = 0;
1214 const buf = global_progress.draw_buffer;
1215
1216 if (global_progress.terminal_mode == .ansi_escape_codes) {
1217 buf[i..][0..start_sync.len].* = start_sync.*;
1218 i += start_sync.len;
1219 }
1220
1221 switch (global_progress.terminal_mode) {
1222 .off => unreachable,
1223 .ansi_escape_codes => {
1224 buf[i..][0..clear.len].* = clear.*;
1225 i += clear.len;
1226 },
1227 .windows_api => if (!is_windows) unreachable,
1228 }
1229
1230 const root_node_index: Node.Index = @enumFromInt(0);
1231 i, const nl_n = computeNode(buf, i, 0, serialized, children, root_node_index);
1232
1233 if (global_progress.terminal_mode == .ansi_escape_codes) {
1234 {
1235 // Set progress state https://conemu.github.io/en/AnsiEscapeCodes.html#ConEmu_specific_OSC
1236 const root_storage = &serialized.storage[0];
1237 const storage = if (root_storage.name[0] != 0 or children[0].child == .none) root_storage else &serialized.storage[@intFromEnum(children[0].child)];
1238 const estimated_total = storage.estimated_total_count;
1239 const completed_items = storage.completed_count;
1240 const status = @atomicLoad(Status, &global_progress.status, .monotonic);
1241 switch (status) {
1242 .working => {
1243 if (estimated_total == 0) {
1244 buf[i..][0..progress_pulsing.len].* = progress_pulsing.*;
1245 i += progress_pulsing.len;
1246 } else {
1247 const percent = completed_items * 100 / estimated_total;
1248 if (std.fmt.bufPrint(buf[i..], @"progress_normal {d}", .{percent})) |b| {
1249 i += b.len;
1250 } else |_| {}
1251 }
1252 },
1253 .success => {
1254 buf[i..][0..progress_remove.len].* = progress_remove.*;
1255 i += progress_remove.len;
1256 },
1257 .failure => {
1258 buf[i..][0..progress_error_100.len].* = progress_error_100.*;
1259 i += progress_error_100.len;
1260 },
1261 .failure_working => {
1262 if (estimated_total == 0) {
1263 buf[i..][0..progress_pulsing_error.len].* = progress_pulsing_error.*;
1264 i += progress_pulsing_error.len;
1265 } else {
1266 const percent = completed_items * 100 / estimated_total;
1267 if (std.fmt.bufPrint(buf[i..], @"progress_error {d}", .{percent})) |b| {
1268 i += b.len;
1269 } else |_| {}
1270 }
1271 },
1272 }
1273 }
1274
1275 if (nl_n > 0) {
1276 buf[i] = '\r';
1277 i += 1;
1278 for (0..nl_n) |_| {
1279 buf[i..][0..up_one_line.len].* = up_one_line.*;
1280 i += up_one_line.len;
1281 }
1282 }
1283
1284 buf[i..][0..finish_sync.len].* = finish_sync.*;
1285 i += finish_sync.len;
1286 }
1287
1288 return .{ buf[0..i], nl_n };
1289}
1290
1291fn computePrefix(
1292 buf: []u8,
1293 start_i: usize,
1294 nl_n: usize,
1295 serialized: Serialized,
1296 children: []const Children,
1297 node_index: Node.Index,
1298) usize {
1299 var i = start_i;
1300 const parent_index = serialized.parents[@intFromEnum(node_index)].unwrap() orelse return i;
1301 if (serialized.parents[@intFromEnum(parent_index)] == .none) return i;
1302 if (@intFromEnum(serialized.parents[@intFromEnum(parent_index)]) == 0 and
1303 serialized.storage[0].name[0] == 0)
1304 {
1305 return i;
1306 }
1307 i = computePrefix(buf, i, nl_n, serialized, children, parent_index);
1308 if (children[@intFromEnum(parent_index)].sibling == .none) {
1309 const prefix = " ";
1310 const upper_bound_len = prefix.len + lineUpperBoundLen(nl_n);
1311 if (i + upper_bound_len > buf.len) return buf.len;
1312 buf[i..][0..prefix.len].* = prefix.*;
1313 i += prefix.len;
1314 } else {
1315 const upper_bound_len = TreeSymbol.line.maxByteLen() + lineUpperBoundLen(nl_n);
1316 if (i + upper_bound_len > buf.len) return buf.len;
1317 i = appendTreeSymbol(.line, buf, i);
1318 }
1319 return i;
1320}
1321
1322fn lineUpperBoundLen(nl_n: usize) usize {
1323 // \r\n on Windows, \n otherwise.
1324 const nl_len = if (is_windows) 2 else 1;
1325 return @max(TreeSymbol.tee.maxByteLen(), TreeSymbol.langle.maxByteLen()) +
1326 "[4294967296/4294967296] ".len + Node.max_name_len + nl_len +
1327 (1 + (nl_n + 1) * up_one_line.len) +
1328 finish_sync.len;
1329}
1330
1331fn computeNode(
1332 buf: []u8,
1333 start_i: usize,
1334 start_nl_n: usize,
1335 serialized: Serialized,
1336 children: []const Children,
1337 node_index: Node.Index,
1338) struct { usize, usize } {
1339 var i = start_i;
1340 var nl_n = start_nl_n;
1341
1342 i = computePrefix(buf, i, nl_n, serialized, children, node_index);
1343
1344 if (i + lineUpperBoundLen(nl_n) > buf.len)
1345 return .{ start_i, start_nl_n };
1346
1347 const storage = &serialized.storage[@intFromEnum(node_index)];
1348 const estimated_total = storage.estimated_total_count;
1349 const completed_items = storage.completed_count;
1350 const name = if (std.mem.indexOfScalar(u8, &storage.name, 0)) |end| storage.name[0..end] else &storage.name;
1351 const parent = serialized.parents[@intFromEnum(node_index)];
1352
1353 if (parent != .none) p: {
1354 if (@intFromEnum(parent) == 0 and serialized.storage[0].name[0] == 0) {
1355 break :p;
1356 }
1357 if (children[@intFromEnum(node_index)].sibling == .none) {
1358 i = appendTreeSymbol(.langle, buf, i);
1359 } else {
1360 i = appendTreeSymbol(.tee, buf, i);
1361 }
1362 }
1363
1364 const is_empty_root = @intFromEnum(node_index) == 0 and serialized.storage[0].name[0] == 0;
1365 if (!is_empty_root) {
1366 if (name.len != 0 or estimated_total > 0) {
1367 if (estimated_total > 0) {
1368 if (std.fmt.bufPrint(buf[i..], "[{d}/{d}] ", .{ completed_items, estimated_total })) |b| {
1369 i += b.len;
1370 } else |_| {}
1371 } else if (completed_items != 0) {
1372 if (std.fmt.bufPrint(buf[i..], "[{d}] ", .{completed_items})) |b| {
1373 i += b.len;
1374 } else |_| {}
1375 }
1376 if (name.len != 0) {
1377 if (std.fmt.bufPrint(buf[i..], "{s}", .{name})) |b| {
1378 i += b.len;
1379 } else |_| {}
1380 }
1381 }
1382
1383 i = @min(global_progress.cols + start_i, i);
1384 if (is_windows) {
1385 // \r\n on Windows is necessary for the old console with the
1386 // ENABLE_VIRTUAL_TERMINAL_PROCESSING | DISABLE_NEWLINE_AUTO_RETURN
1387 // console modes set to behave properly.
1388 buf[i] = '\r';
1389 i += 1;
1390 }
1391 buf[i] = '\n';
1392 i += 1;
1393 nl_n += 1;
1394 }
1395
1396 if (global_progress.withinRowLimit(nl_n)) {
1397 if (children[@intFromEnum(node_index)].child.unwrap()) |child| {
1398 i, nl_n = computeNode(buf, i, nl_n, serialized, children, child);
1399 }
1400 }
1401
1402 if (global_progress.withinRowLimit(nl_n)) {
1403 if (children[@intFromEnum(node_index)].sibling.unwrap()) |sibling| {
1404 i, nl_n = computeNode(buf, i, nl_n, serialized, children, sibling);
1405 }
1406 }
1407
1408 return .{ i, nl_n };
1409}
1410
1411fn withinRowLimit(p: *Progress, nl_n: usize) bool {
1412 // The +2 here is so that the PS1 is not scrolled off the top of the terminal.
1413 // one because we keep the cursor on the next line
1414 // one more to account for the PS1
1415 return nl_n + 2 < p.rows;
1416}
1417
1418fn write(buf: []const u8) anyerror!void {
1419 try global_progress.terminal.writeAll(buf);
1420}
1421
1422var remaining_write_trash_bytes: usize = 0;
1423
1424fn writeIpc(fd: posix.fd_t, serialized: Serialized) error{BrokenPipe}!void {
1425 // Byteswap if necessary to ensure little endian over the pipe. This is
1426 // needed because the parent or child process might be running in qemu.
1427 if (is_big_endian) for (serialized.storage) |*s| s.byteSwap();
1428
1429 assert(serialized.parents.len == serialized.storage.len);
1430 const serialized_len: u8 = @intCast(serialized.parents.len);
1431 const header = std.mem.asBytes(&serialized_len);
1432 const storage = std.mem.sliceAsBytes(serialized.storage);
1433 const parents = std.mem.sliceAsBytes(serialized.parents);
1434
1435 var vecs: [3]posix.iovec_const = .{
1436 .{ .base = header.ptr, .len = header.len },
1437 .{ .base = storage.ptr, .len = storage.len },
1438 .{ .base = parents.ptr, .len = parents.len },
1439 };
1440
1441 // Ensures the packet can fit in the pipe buffer.
1442 const upper_bound_msg_len = 1 + node_storage_buffer_len * @sizeOf(Node.Storage) +
1443 node_storage_buffer_len * @sizeOf(Node.OptionalIndex);
1444 comptime assert(upper_bound_msg_len <= 4096);
1445
1446 while (remaining_write_trash_bytes > 0) {
1447 // We do this in a separate write call to give a better chance for the
1448 // writev below to be in a single packet.
1449 const n = @min(parents.len, remaining_write_trash_bytes);
1450 if (posix.write(fd, parents[0..n])) |written| {
1451 remaining_write_trash_bytes -= written;
1452 continue;
1453 } else |err| switch (err) {
1454 error.WouldBlock => return,
1455 error.BrokenPipe => return error.BrokenPipe,
1456 else => |e| {
1457 std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)});
1458 return error.BrokenPipe;
1459 },
1460 }
1461 }
1462
1463 // If this write would block we do not want to keep trying, but we need to
1464 // know if a partial message was written.
1465 if (writevNonblock(fd, &vecs)) |written| {
1466 const total = header.len + storage.len + parents.len;
1467 if (written < total) {
1468 remaining_write_trash_bytes = total - written;
1469 }
1470 } else |err| switch (err) {
1471 error.WouldBlock => {},
1472 error.BrokenPipe => return error.BrokenPipe,
1473 else => |e| {
1474 std.log.debug("failed to send progress to parent process: {s}", .{@errorName(e)});
1475 return error.BrokenPipe;
1476 },
1477 }
1478}
1479
1480fn writevNonblock(fd: posix.fd_t, iov: []posix.iovec_const) posix.WriteError!usize {
1481 var iov_index: usize = 0;
1482 var written: usize = 0;
1483 var total_written: usize = 0;
1484 while (true) {
1485 while (if (iov_index < iov.len)
1486 written >= iov[iov_index].len
1487 else
1488 return total_written) : (iov_index += 1) written -= iov[iov_index].len;
1489 iov[iov_index].base += written;
1490 iov[iov_index].len -= written;
1491 written = try posix.writev(fd, iov[iov_index..]);
1492 if (written == 0) return total_written;
1493 total_written += written;
1494 }
1495}
1496
1497fn maybeUpdateSize(resize_flag: bool) void {
1498 if (!resize_flag) return;
1499
1500 const fd = global_progress.terminal.handle;
1501
1502 if (is_windows) {
1503 var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
1504
1505 if (windows.kernel32.GetConsoleScreenBufferInfo(fd, &info) != windows.FALSE) {
1506 // In the old Windows console, dwSize.Y is the line count of the
1507 // entire scrollback buffer, so we use this instead so that we
1508 // always get the size of the screen.
1509 const screen_height = info.srWindow.Bottom - info.srWindow.Top;
1510 global_progress.rows = @intCast(screen_height);
1511 global_progress.cols = @intCast(info.dwSize.X);
1512 } else {
1513 std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{});
1514 global_progress.rows = 25;
1515 global_progress.cols = 80;
1516 }
1517 } else {
1518 var winsize: posix.winsize = .{
1519 .row = 0,
1520 .col = 0,
1521 .xpixel = 0,
1522 .ypixel = 0,
1523 };
1524
1525 const err = posix.system.ioctl(fd, posix.T.IOCGWINSZ, @intFromPtr(&winsize));
1526 if (posix.errno(err) == .SUCCESS) {
1527 global_progress.rows = winsize.row;
1528 global_progress.cols = winsize.col;
1529 } else {
1530 std.log.debug("failed to determine terminal size; using conservative guess 80x25", .{});
1531 global_progress.rows = 25;
1532 global_progress.cols = 80;
1533 }
1534 }
1535}
1536
1537fn handleSigWinch(sig: posix.SIG, info: *const posix.siginfo_t, ctx_ptr: ?*anyopaque) callconv(.c) void {
1538 _ = info;
1539 _ = ctx_ptr;
1540 assert(sig == .WINCH);
1541 global_progress.redraw_event.set();
1542}
1543
1544const have_sigwinch = switch (builtin.os.tag) {
1545 .linux,
1546 .plan9,
1547 .illumos,
1548 .netbsd,
1549 .openbsd,
1550 .haiku,
1551 .driverkit,
1552 .ios,
1553 .maccatalyst,
1554 .macos,
1555 .tvos,
1556 .visionos,
1557 .watchos,
1558 .dragonfly,
1559 .freebsd,
1560 .serenity,
1561 => true,
1562
1563 else => false,
1564};
1565
1566/// The primary motivation for recursive mutex here is so that a panic while
1567/// stderr mutex is held still dumps the stack trace and other debug
1568/// information.
1569var stderr_mutex = std.Thread.Mutex.Recursive.init;
1570
1571fn copyAtomicStore(dest: []align(@alignOf(usize)) u8, src: []const u8) void {
1572 assert(dest.len == src.len);
1573 const chunked_len = dest.len / @sizeOf(usize);
1574 const dest_chunked: []usize = @as([*]usize, @ptrCast(dest))[0..chunked_len];
1575 const src_chunked: []align(1) const usize = @as([*]align(1) const usize, @ptrCast(src))[0..chunked_len];
1576 for (dest_chunked, src_chunked) |*d, s| {
1577 @atomicStore(usize, d, s, .monotonic);
1578 }
1579 const remainder_start = chunked_len * @sizeOf(usize);
1580 for (dest[remainder_start..], src[remainder_start..]) |*d, s| {
1581 @atomicStore(u8, d, s, .monotonic);
1582 }
1583}
1584
1585fn copyAtomicLoad(
1586 dest: *align(@alignOf(usize)) [Node.max_name_len]u8,
1587 src: *align(@alignOf(usize)) const [Node.max_name_len]u8,
1588) void {
1589 const chunked_len = @divExact(dest.len, @sizeOf(usize));
1590 const dest_chunked: *[chunked_len]usize = @ptrCast(dest);
1591 const src_chunked: *const [chunked_len]usize = @ptrCast(src);
1592 for (dest_chunked, src_chunked) |*d, *s| {
1593 d.* = @atomicLoad(usize, s, .monotonic);
1594 }
1595}