master
1//! Represents one independent job whose responsibility is to:
2//!
3//! 1. Check the global zig package cache to see if the hash already exists.
4//! If so, load, parse, and validate the build.zig.zon file therein, and
5//! goto step 8. Likewise if the location is a relative path, treat this
6//! the same as a cache hit. Otherwise, proceed.
7//! 2. Fetch and unpack a URL into a temporary directory.
8//! 3. Load, parse, and validate the build.zig.zon file therein. It is allowed
9//! for the file to be missing, in which case this fetched package is considered
10//! to be a "naked" package.
11//! 4. Apply inclusion rules of the build.zig.zon to the temporary directory by
12//! deleting excluded files. If any files had errors for files that were
13//! ultimately excluded, those errors should be ignored, such as failure to
14//! create symlinks that weren't supposed to be included anyway.
15//! 5. Compute the package hash based on the remaining files in the temporary
16//! directory.
17//! 6. Rename the temporary directory into the global zig package cache
18//! directory. If the hash already exists, delete the temporary directory and
19//! leave the zig package cache directory untouched as it may be in use by the
20//! system. This is done even if the hash is invalid, in case the package with
21//! the different hash is used in the future.
22//! 7. Validate the computed hash against the expected hash. If invalid,
23//! this job is done.
24//! 8. Spawn a new fetch job for each dependency in the manifest file. Use
25//! a mutex and a hash map so that redundant jobs do not get queued up.
26//!
27//! All of this must be done with only referring to the state inside this struct
28//! because this work will be done in a dedicated thread.
29const Fetch = @This();
30
31const builtin = @import("builtin");
32const native_os = builtin.os.tag;
33
34const std = @import("std");
35const Io = std.Io;
36const fs = std.fs;
37const assert = std.debug.assert;
38const ascii = std.ascii;
39const Allocator = std.mem.Allocator;
40const Cache = std.Build.Cache;
41const git = @import("Fetch/git.zig");
42const Package = @import("../Package.zig");
43const Manifest = Package.Manifest;
44const ErrorBundle = std.zig.ErrorBundle;
45
46arena: std.heap.ArenaAllocator,
47location: Location,
48location_tok: std.zig.Ast.TokenIndex,
49hash_tok: std.zig.Ast.OptionalTokenIndex,
50name_tok: std.zig.Ast.TokenIndex,
51lazy_status: LazyStatus,
52parent_package_root: Cache.Path,
53parent_manifest_ast: ?*const std.zig.Ast,
54prog_node: std.Progress.Node,
55job_queue: *JobQueue,
56/// If true, don't add an error for a missing hash. This flag is not passed
57/// down to recursive dependencies. It's intended to be used only be the CLI.
58omit_missing_hash_error: bool,
59/// If true, don't fail when a manifest file is missing the `paths` field,
60/// which specifies inclusion rules. This is intended to be true for the first
61/// fetch task and false for the recursive dependencies.
62allow_missing_paths_field: bool,
63allow_missing_fingerprint: bool,
64allow_name_string: bool,
65/// If true and URL points to a Git repository, will use the latest commit.
66use_latest_commit: bool,
67
68// Above this are fields provided as inputs to `run`.
69// Below this are fields populated by `run`.
70
71/// This will either be relative to `global_cache`, or to the build root of
72/// the root package.
73package_root: Cache.Path,
74error_bundle: ErrorBundle.Wip,
75manifest: ?Manifest,
76manifest_ast: std.zig.Ast,
77computed_hash: ComputedHash,
78/// Fetch logic notices whether a package has a build.zig file and sets this flag.
79has_build_zig: bool,
80/// Indicates whether the task aborted due to an out-of-memory condition.
81oom_flag: bool,
82/// If `use_latest_commit` was true, this will be set to the commit that was used.
83/// If the resource pointed to by the location is not a Git-repository, this
84/// will be left unchanged.
85latest_commit: ?git.Oid,
86
87// This field is used by the CLI only, untouched by this file.
88
89/// The module for this `Fetch` tasks's package, which exposes `build.zig` as
90/// the root source file.
91module: ?*Package.Module,
92
93pub const LazyStatus = enum {
94 /// Not lazy.
95 eager,
96 /// Lazy, found.
97 available,
98 /// Lazy, not found.
99 unavailable,
100};
101
102/// Contains shared state among all `Fetch` tasks.
103pub const JobQueue = struct {
104 io: Io,
105 mutex: Io.Mutex = .init,
106 /// It's an array hash map so that it can be sorted before rendering the
107 /// dependencies.zig source file.
108 /// Protected by `mutex`.
109 table: Table = .{},
110 /// `table` may be missing some tasks such as ones that failed, so this
111 /// field contains references to all of them.
112 /// Protected by `mutex`.
113 all_fetches: std.ArrayList(*Fetch) = .empty,
114
115 http_client: *std.http.Client,
116 group: Io.Group = .init,
117 global_cache: Cache.Directory,
118 /// If true then, no fetching occurs, and:
119 /// * The `global_cache` directory is assumed to be the direct parent
120 /// directory of on-disk packages rather than having the "p/" directory
121 /// prefix inside of it.
122 /// * An error occurs if any non-lazy packages are not already present in
123 /// the package cache directory.
124 /// * Missing hash field causes an error, and no fetching occurs so it does
125 /// not print the correct hash like usual.
126 read_only: bool,
127 recursive: bool,
128 /// Dumps hash information to stdout which can be used to troubleshoot why
129 /// two hashes of the same package do not match.
130 /// If this is true, `recursive` must be false.
131 debug_hash: bool,
132 work_around_btrfs_bug: bool,
133 mode: Mode,
134 /// Set of hashes that will be additionally fetched even if they are marked
135 /// as lazy.
136 unlazy_set: UnlazySet = .{},
137
138 pub const Mode = enum {
139 /// Non-lazy dependencies are always fetched.
140 /// Lazy dependencies are fetched only when needed.
141 needed,
142 /// Both non-lazy and lazy dependencies are always fetched.
143 all,
144 };
145 pub const Table = std.AutoArrayHashMapUnmanaged(Package.Hash, *Fetch);
146 pub const UnlazySet = std.AutoArrayHashMapUnmanaged(Package.Hash, void);
147
148 pub fn deinit(jq: *JobQueue) void {
149 if (jq.all_fetches.items.len == 0) return;
150 const gpa = jq.all_fetches.items[0].arena.child_allocator;
151 jq.table.deinit(gpa);
152 // These must be deinitialized in reverse order because subsequent
153 // `Fetch` instances are allocated in prior ones' arenas.
154 // Sorry, I know it's a bit weird, but it slightly simplifies the
155 // critical section.
156 while (jq.all_fetches.pop()) |f| f.deinit();
157 jq.all_fetches.deinit(gpa);
158 jq.* = undefined;
159 }
160
161 /// Dumps all subsequent error bundles into the first one.
162 pub fn consolidateErrors(jq: *JobQueue) !void {
163 const root = &jq.all_fetches.items[0].error_bundle;
164 const gpa = root.gpa;
165 for (jq.all_fetches.items[1..]) |fetch| {
166 if (fetch.error_bundle.root_list.items.len > 0) {
167 var bundle = try fetch.error_bundle.toOwnedBundle("");
168 defer bundle.deinit(gpa);
169 try root.addBundleAsRoots(bundle);
170 }
171 }
172 }
173
174 /// Creates the dependencies.zig source code for the build runner to obtain
175 /// via `@import("@dependencies")`.
176 pub fn createDependenciesSource(jq: *JobQueue, buf: *std.array_list.Managed(u8)) Allocator.Error!void {
177 const keys = jq.table.keys();
178
179 assert(keys.len != 0); // caller should have added the first one
180 if (keys.len == 1) {
181 // This is the first one. It must have no dependencies.
182 return createEmptyDependenciesSource(buf);
183 }
184
185 try buf.appendSlice("pub const packages = struct {\n");
186
187 // Ensure the generated .zig file is deterministic.
188 jq.table.sortUnstable(@as(struct {
189 keys: []const Package.Hash,
190 pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
191 return std.mem.lessThan(u8, &ctx.keys[a_index].bytes, &ctx.keys[b_index].bytes);
192 }
193 }, .{ .keys = keys }));
194
195 for (keys, jq.table.values()) |*hash, fetch| {
196 if (fetch == jq.all_fetches.items[0]) {
197 // The first one is a dummy package for the current project.
198 continue;
199 }
200
201 const hash_slice = hash.toSlice();
202
203 try buf.print(
204 \\ pub const {f} = struct {{
205 \\
206 , .{std.zig.fmtId(hash_slice)});
207
208 lazy: {
209 switch (fetch.lazy_status) {
210 .eager => break :lazy,
211 .available => {
212 try buf.appendSlice(
213 \\ pub const available = true;
214 \\
215 );
216 break :lazy;
217 },
218 .unavailable => {
219 try buf.appendSlice(
220 \\ pub const available = false;
221 \\ };
222 \\
223 );
224 continue;
225 },
226 }
227 }
228
229 try buf.print(
230 \\ pub const build_root = "{f}";
231 \\
232 , .{std.fmt.alt(fetch.package_root, .formatEscapeString)});
233
234 if (fetch.has_build_zig) {
235 try buf.print(
236 \\ pub const build_zig = @import("{f}");
237 \\
238 , .{std.zig.fmtString(hash_slice)});
239 }
240
241 if (fetch.manifest) |*manifest| {
242 try buf.appendSlice(
243 \\ pub const deps: []const struct { []const u8, []const u8 } = &.{
244 \\
245 );
246 for (manifest.dependencies.keys(), manifest.dependencies.values()) |name, dep| {
247 const h = depDigest(fetch.package_root, jq.global_cache, dep) orelse continue;
248 try buf.print(
249 " .{{ \"{f}\", \"{f}\" }},\n",
250 .{ std.zig.fmtString(name), std.zig.fmtString(h.toSlice()) },
251 );
252 }
253
254 try buf.appendSlice(
255 \\ };
256 \\ };
257 \\
258 );
259 } else {
260 try buf.appendSlice(
261 \\ pub const deps: []const struct { []const u8, []const u8 } = &.{};
262 \\ };
263 \\
264 );
265 }
266 }
267
268 try buf.appendSlice(
269 \\};
270 \\
271 \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{
272 \\
273 );
274
275 const root_fetch = jq.all_fetches.items[0];
276 const root_manifest = &root_fetch.manifest.?;
277
278 for (root_manifest.dependencies.keys(), root_manifest.dependencies.values()) |name, dep| {
279 const h = depDigest(root_fetch.package_root, jq.global_cache, dep) orelse continue;
280 try buf.print(
281 " .{{ \"{f}\", \"{f}\" }},\n",
282 .{ std.zig.fmtString(name), std.zig.fmtString(h.toSlice()) },
283 );
284 }
285 try buf.appendSlice("};\n");
286 }
287
288 pub fn createEmptyDependenciesSource(buf: *std.array_list.Managed(u8)) Allocator.Error!void {
289 try buf.appendSlice(
290 \\pub const packages = struct {};
291 \\pub const root_deps: []const struct { []const u8, []const u8 } = &.{};
292 \\
293 );
294 }
295};
296
297pub const Location = union(enum) {
298 remote: Remote,
299 /// A directory found inside the parent package.
300 relative_path: Cache.Path,
301 /// Recursive Fetch tasks will never use this Location, but it may be
302 /// passed in by the CLI. Indicates the file contents here should be copied
303 /// into the global package cache. It may be a file relative to the cwd or
304 /// absolute, in which case it should be treated exactly like a `file://`
305 /// URL, or a directory, in which case it should be treated as an
306 /// already-unpacked directory (but still needs to be copied into the
307 /// global package cache and have inclusion rules applied).
308 path_or_url: []const u8,
309
310 pub const Remote = struct {
311 url: []const u8,
312 /// If this is null it means the user omitted the hash field from a dependency.
313 /// It will be an error but the logic should still fetch and print the discovered hash.
314 hash: ?Package.Hash,
315 };
316};
317
318pub const RunError = error{
319 OutOfMemory,
320 Canceled,
321 /// This error code is intended to be handled by inspecting the
322 /// `error_bundle` field.
323 FetchFailed,
324};
325
326pub fn run(f: *Fetch) RunError!void {
327 const io = f.job_queue.io;
328 const eb = &f.error_bundle;
329 const arena = f.arena.allocator();
330 const gpa = f.arena.child_allocator;
331 const cache_root = f.job_queue.global_cache;
332
333 try eb.init(gpa);
334
335 // Check the global zig package cache to see if the hash already exists. If
336 // so, load, parse, and validate the build.zig.zon file therein, and skip
337 // ahead to queuing up jobs for dependencies. Likewise if the location is a
338 // relative path, treat this the same as a cache hit. Otherwise, proceed.
339
340 const remote = switch (f.location) {
341 .relative_path => |pkg_root| {
342 if (fs.path.isAbsolute(pkg_root.sub_path)) return f.fail(
343 f.location_tok,
344 try eb.addString("expected path relative to build root; found absolute path"),
345 );
346 if (f.hash_tok.unwrap()) |hash_tok| return f.fail(
347 hash_tok,
348 try eb.addString("path-based dependencies are not hashed"),
349 );
350 // Packages fetched by URL may not use relative paths to escape outside the
351 // fetched package directory from within the package cache.
352 if (pkg_root.root_dir.eql(cache_root)) {
353 // `parent_package_root.sub_path` contains a path like this:
354 // "p/$hash", or
355 // "p/$hash/foo", with possibly more directories after "foo".
356 // We want to fail unless the resolved relative path has a
357 // prefix of "p/$hash/".
358 const prefix_len: usize = if (f.job_queue.read_only) 0 else "p/".len;
359 const parent_sub_path = f.parent_package_root.sub_path;
360 const end = find_end: {
361 if (parent_sub_path.len > prefix_len) {
362 // Use `isSep` instead of `indexOfScalarPos` to account for
363 // Windows accepting both `\` and `/` as path separators.
364 for (parent_sub_path[prefix_len..], prefix_len..) |c, i| {
365 if (std.fs.path.isSep(c)) break :find_end i;
366 }
367 }
368 break :find_end parent_sub_path.len;
369 };
370 const expected_prefix = parent_sub_path[0..end];
371 if (!std.mem.startsWith(u8, pkg_root.sub_path, expected_prefix)) {
372 return f.fail(
373 f.location_tok,
374 try eb.printString("dependency path outside project: '{f}'", .{pkg_root}),
375 );
376 }
377 }
378 f.package_root = pkg_root;
379 try loadManifest(f, pkg_root);
380 if (!f.has_build_zig) try checkBuildFileExistence(f);
381 if (!f.job_queue.recursive) return;
382 return queueJobsForDeps(f);
383 },
384 .remote => |remote| remote,
385 .path_or_url => |path_or_url| {
386 if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| {
387 var resource: Resource = .{ .dir = dir };
388 return f.runResource(path_or_url, &resource, null);
389 } else |dir_err| {
390 var server_header_buffer: [init_resource_buffer_size]u8 = undefined;
391
392 const file_err = if (dir_err == error.NotDir) e: {
393 if (fs.cwd().openFile(path_or_url, .{})) |file| {
394 var resource: Resource = .{ .file = file.reader(io, &server_header_buffer) };
395 return f.runResource(path_or_url, &resource, null);
396 } else |err| break :e err;
397 } else dir_err;
398
399 const uri = std.Uri.parse(path_or_url) catch |uri_err| {
400 return f.fail(0, try eb.printString(
401 "'{s}' could not be recognized as a file path ({t}) or an URL ({t})",
402 .{ path_or_url, file_err, uri_err },
403 ));
404 };
405 var resource: Resource = undefined;
406 try f.initResource(uri, &resource, &server_header_buffer);
407 return f.runResource(try uri.path.toRawMaybeAlloc(arena), &resource, null);
408 }
409 },
410 };
411
412 if (remote.hash) |expected_hash| {
413 var prefixed_pkg_sub_path_buffer: [Package.Hash.max_len + 2]u8 = undefined;
414 prefixed_pkg_sub_path_buffer[0] = 'p';
415 prefixed_pkg_sub_path_buffer[1] = fs.path.sep;
416 const hash_slice = expected_hash.toSlice();
417 @memcpy(prefixed_pkg_sub_path_buffer[2..][0..hash_slice.len], hash_slice);
418 const prefixed_pkg_sub_path = prefixed_pkg_sub_path_buffer[0 .. 2 + hash_slice.len];
419 const prefix_len: usize = if (f.job_queue.read_only) "p/".len else 0;
420 const pkg_sub_path = prefixed_pkg_sub_path[prefix_len..];
421 if (cache_root.handle.access(pkg_sub_path, .{})) |_| {
422 assert(f.lazy_status != .unavailable);
423 f.package_root = .{
424 .root_dir = cache_root,
425 .sub_path = try arena.dupe(u8, pkg_sub_path),
426 };
427 try loadManifest(f, f.package_root);
428 try checkBuildFileExistence(f);
429 if (!f.job_queue.recursive) return;
430 return queueJobsForDeps(f);
431 } else |err| switch (err) {
432 error.FileNotFound => {
433 switch (f.lazy_status) {
434 .eager => {},
435 .available => if (!f.job_queue.unlazy_set.contains(expected_hash)) {
436 f.lazy_status = .unavailable;
437 return;
438 },
439 .unavailable => unreachable,
440 }
441 if (f.job_queue.read_only) return f.fail(
442 f.name_tok,
443 try eb.printString("package not found at '{f}{s}'", .{
444 cache_root, pkg_sub_path,
445 }),
446 );
447 },
448 else => |e| {
449 try eb.addRootErrorMessage(.{
450 .msg = try eb.printString("unable to open global package cache directory '{f}{s}': {s}", .{
451 cache_root, pkg_sub_path, @errorName(e),
452 }),
453 });
454 return error.FetchFailed;
455 },
456 }
457 } else if (f.job_queue.read_only) {
458 try eb.addRootErrorMessage(.{
459 .msg = try eb.addString("dependency is missing hash field"),
460 .src_loc = try f.srcLoc(f.location_tok),
461 });
462 return error.FetchFailed;
463 }
464
465 // Fetch and unpack the remote into a temporary directory.
466
467 const uri = std.Uri.parse(remote.url) catch |err| return f.fail(
468 f.location_tok,
469 try eb.printString("invalid URI: {s}", .{@errorName(err)}),
470 );
471 var buffer: [init_resource_buffer_size]u8 = undefined;
472 var resource: Resource = undefined;
473 try f.initResource(uri, &resource, &buffer);
474 return f.runResource(try uri.path.toRawMaybeAlloc(arena), &resource, remote.hash);
475}
476
477pub fn deinit(f: *Fetch) void {
478 f.error_bundle.deinit();
479 f.arena.deinit();
480}
481
482/// Consumes `resource`, even if an error is returned.
483fn runResource(
484 f: *Fetch,
485 uri_path: []const u8,
486 resource: *Resource,
487 remote_hash: ?Package.Hash,
488) RunError!void {
489 const io = f.job_queue.io;
490 defer resource.deinit(io);
491 const arena = f.arena.allocator();
492 const eb = &f.error_bundle;
493 const s = fs.path.sep_str;
494 const cache_root = f.job_queue.global_cache;
495 const rand_int = std.crypto.random.int(u64);
496 const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(rand_int);
497
498 const package_sub_path = blk: {
499 const tmp_directory_path = try cache_root.join(arena, &.{tmp_dir_sub_path});
500 var tmp_directory: Cache.Directory = .{
501 .path = tmp_directory_path,
502 .handle = handle: {
503 const dir = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
504 .iterate = true,
505 }) catch |err| {
506 try eb.addRootErrorMessage(.{
507 .msg = try eb.printString("unable to create temporary directory '{s}': {s}", .{
508 tmp_directory_path, @errorName(err),
509 }),
510 });
511 return error.FetchFailed;
512 };
513 break :handle dir;
514 },
515 };
516 defer tmp_directory.handle.close();
517
518 // Fetch and unpack a resource into a temporary directory.
519 var unpack_result = try unpackResource(f, resource, uri_path, tmp_directory);
520
521 var pkg_path: Cache.Path = .{ .root_dir = tmp_directory, .sub_path = unpack_result.root_dir };
522
523 // Apply btrfs workaround if needed. Reopen tmp_directory.
524 if (native_os == .linux and f.job_queue.work_around_btrfs_bug) {
525 // https://github.com/ziglang/zig/issues/17095
526 pkg_path.root_dir.handle.close();
527 pkg_path.root_dir.handle = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
528 .iterate = true,
529 }) catch @panic("btrfs workaround failed");
530 }
531
532 // Load, parse, and validate the unpacked build.zig.zon file. It is allowed
533 // for the file to be missing, in which case this fetched package is
534 // considered to be a "naked" package.
535 try loadManifest(f, pkg_path);
536
537 const filter: Filter = .{
538 .include_paths = if (f.manifest) |m| m.paths else .{},
539 };
540
541 // Ignore errors that were excluded by manifest, such as failure to
542 // create symlinks that weren't supposed to be included anyway.
543 try unpack_result.validate(f, filter);
544
545 // Apply the manifest's inclusion rules to the temporary directory by
546 // deleting excluded files.
547 // Empty directories have already been omitted by `unpackResource`.
548 // Compute the package hash based on the remaining files in the temporary
549 // directory.
550 f.computed_hash = try computeHash(f, pkg_path, filter);
551
552 break :blk if (unpack_result.root_dir.len > 0)
553 try fs.path.join(arena, &.{ tmp_dir_sub_path, unpack_result.root_dir })
554 else
555 tmp_dir_sub_path;
556 };
557
558 const computed_package_hash = computedPackageHash(f);
559
560 // Rename the temporary directory into the global zig package cache
561 // directory. If the hash already exists, delete the temporary directory
562 // and leave the zig package cache directory untouched as it may be in use
563 // by the system. This is done even if the hash is invalid, in case the
564 // package with the different hash is used in the future.
565
566 f.package_root = .{
567 .root_dir = cache_root,
568 .sub_path = try std.fmt.allocPrint(arena, "p" ++ s ++ "{s}", .{computed_package_hash.toSlice()}),
569 };
570 renameTmpIntoCache(cache_root.handle, package_sub_path, f.package_root.sub_path) catch |err| {
571 const src = try cache_root.join(arena, &.{tmp_dir_sub_path});
572 const dest = try cache_root.join(arena, &.{f.package_root.sub_path});
573 try eb.addRootErrorMessage(.{ .msg = try eb.printString(
574 "unable to rename temporary directory '{s}' into package cache directory '{s}': {s}",
575 .{ src, dest, @errorName(err) },
576 ) });
577 return error.FetchFailed;
578 };
579 // Remove temporary directory root if not already renamed to global cache.
580 if (!std.mem.eql(u8, package_sub_path, tmp_dir_sub_path)) {
581 cache_root.handle.deleteDir(tmp_dir_sub_path) catch {};
582 }
583
584 // Validate the computed hash against the expected hash. If invalid, this
585 // job is done.
586
587 if (remote_hash) |declared_hash| {
588 const hash_tok = f.hash_tok.unwrap().?;
589 if (declared_hash.isOld()) {
590 const actual_hex = Package.multiHashHexDigest(f.computed_hash.digest);
591 if (!std.mem.eql(u8, declared_hash.toSlice(), &actual_hex)) {
592 return f.fail(hash_tok, try eb.printString(
593 "hash mismatch: manifest declares '{s}' but the fetched package has '{s}'",
594 .{ declared_hash.toSlice(), actual_hex },
595 ));
596 }
597 } else {
598 if (!computed_package_hash.eql(&declared_hash)) {
599 return f.fail(hash_tok, try eb.printString(
600 "hash mismatch: manifest declares '{s}' but the fetched package has '{s}'",
601 .{ declared_hash.toSlice(), computed_package_hash.toSlice() },
602 ));
603 }
604 }
605 } else if (!f.omit_missing_hash_error) {
606 const notes_len = 1;
607 try eb.addRootErrorMessage(.{
608 .msg = try eb.addString("dependency is missing hash field"),
609 .src_loc = try f.srcLoc(f.location_tok),
610 .notes_len = notes_len,
611 });
612 const notes_start = try eb.reserveNotes(notes_len);
613 eb.extra.items[notes_start] = @intFromEnum(try eb.addErrorMessage(.{
614 .msg = try eb.printString("expected .hash = \"{s}\",", .{computed_package_hash.toSlice()}),
615 }));
616 return error.FetchFailed;
617 }
618
619 // Spawn a new fetch job for each dependency in the manifest file. Use
620 // a mutex and a hash map so that redundant jobs do not get queued up.
621 if (!f.job_queue.recursive) return;
622 return queueJobsForDeps(f);
623}
624
625pub fn computedPackageHash(f: *const Fetch) Package.Hash {
626 const saturated_size = std.math.cast(u32, f.computed_hash.total_size) orelse std.math.maxInt(u32);
627 if (f.manifest) |man| {
628 var version_buffer: [32]u8 = undefined;
629 const version: []const u8 = std.fmt.bufPrint(&version_buffer, "{f}", .{man.version}) catch &version_buffer;
630 return .init(f.computed_hash.digest, man.name, version, man.id, saturated_size);
631 }
632 // In the future build.zig.zon fields will be added to allow overriding these values
633 // for naked tarballs.
634 return .init(f.computed_hash.digest, "N", "V", 0xffff, saturated_size);
635}
636
637/// `computeHash` gets a free check for the existence of `build.zig`, but when
638/// not computing a hash, we need to do a syscall to check for it.
639fn checkBuildFileExistence(f: *Fetch) RunError!void {
640 const eb = &f.error_bundle;
641 if (f.package_root.access(Package.build_zig_basename, .{})) |_| {
642 f.has_build_zig = true;
643 } else |err| switch (err) {
644 error.FileNotFound => {},
645 else => |e| {
646 try eb.addRootErrorMessage(.{
647 .msg = try eb.printString("unable to access '{f}{s}': {s}", .{
648 f.package_root, Package.build_zig_basename, @errorName(e),
649 }),
650 });
651 return error.FetchFailed;
652 },
653 }
654}
655
656/// This function populates `f.manifest` or leaves it `null`.
657fn loadManifest(f: *Fetch, pkg_root: Cache.Path) RunError!void {
658 const eb = &f.error_bundle;
659 const arena = f.arena.allocator();
660 const manifest_bytes = pkg_root.root_dir.handle.readFileAllocOptions(
661 try fs.path.join(arena, &.{ pkg_root.sub_path, Manifest.basename }),
662 arena,
663 .limited(Manifest.max_bytes),
664 .@"1",
665 0,
666 ) catch |err| switch (err) {
667 error.FileNotFound => return,
668 else => |e| {
669 const file_path = try pkg_root.join(arena, Manifest.basename);
670 try eb.addRootErrorMessage(.{
671 .msg = try eb.printString("unable to load package manifest '{f}': {s}", .{
672 file_path, @errorName(e),
673 }),
674 });
675 return error.FetchFailed;
676 },
677 };
678
679 const ast = &f.manifest_ast;
680 ast.* = try std.zig.Ast.parse(arena, manifest_bytes, .zon);
681
682 if (ast.errors.len > 0) {
683 const file_path = try std.fmt.allocPrint(arena, "{f}" ++ fs.path.sep_str ++ Manifest.basename, .{pkg_root});
684 try std.zig.putAstErrorsIntoBundle(arena, ast.*, file_path, eb);
685 return error.FetchFailed;
686 }
687
688 f.manifest = try Manifest.parse(arena, ast.*, .{
689 .allow_missing_paths_field = f.allow_missing_paths_field,
690 .allow_missing_fingerprint = f.allow_missing_fingerprint,
691 .allow_name_string = f.allow_name_string,
692 });
693 const manifest = &f.manifest.?;
694
695 if (manifest.errors.len > 0) {
696 const src_path = try eb.printString("{f}" ++ fs.path.sep_str ++ "{s}", .{ pkg_root, Manifest.basename });
697 try manifest.copyErrorsIntoBundle(ast.*, src_path, eb);
698 return error.FetchFailed;
699 }
700}
701
702fn queueJobsForDeps(f: *Fetch) RunError!void {
703 const io = f.job_queue.io;
704
705 assert(f.job_queue.recursive);
706
707 // If the package does not have a build.zig.zon file then there are no dependencies.
708 const manifest = f.manifest orelse return;
709
710 const new_fetches, const prog_names = nf: {
711 const parent_arena = f.arena.allocator();
712 const gpa = f.arena.child_allocator;
713 const cache_root = f.job_queue.global_cache;
714 const dep_names = manifest.dependencies.keys();
715 const deps = manifest.dependencies.values();
716 // Grab the new tasks into a temporary buffer so we can unlock that mutex
717 // as fast as possible.
718 // This overallocates any fetches that get skipped by the `continue` in the
719 // loop below.
720 const new_fetches = try parent_arena.alloc(Fetch, deps.len);
721 const prog_names = try parent_arena.alloc([]const u8, deps.len);
722 var new_fetch_index: usize = 0;
723
724 try f.job_queue.mutex.lock(io);
725 defer f.job_queue.mutex.unlock(io);
726
727 try f.job_queue.all_fetches.ensureUnusedCapacity(gpa, new_fetches.len);
728 try f.job_queue.table.ensureUnusedCapacity(gpa, @intCast(new_fetches.len));
729
730 // There are four cases here:
731 // * Correct hash is provided by manifest.
732 // - Hash map already has the entry, no need to add it again.
733 // * Incorrect hash is provided by manifest.
734 // - Hash mismatch error emitted; `queueJobsForDeps` is not called.
735 // * Hash is not provided by manifest.
736 // - Hash missing error emitted; `queueJobsForDeps` is not called.
737 // * path-based location is used without a hash.
738 // - Hash is added to the table based on the path alone before
739 // calling run(); no need to add it again.
740 //
741 // If we add a dep as lazy and then later try to add the same dep as eager,
742 // eagerness takes precedence and the existing entry is updated and re-scheduled
743 // for fetching.
744
745 for (dep_names, deps) |dep_name, dep| {
746 var promoted_existing_to_eager = false;
747 const new_fetch = &new_fetches[new_fetch_index];
748 const location: Location = switch (dep.location) {
749 .url => |url| .{
750 .remote = .{
751 .url = url,
752 .hash = h: {
753 const h = dep.hash orelse break :h null;
754 const pkg_hash: Package.Hash = .fromSlice(h);
755 if (h.len == 0) break :h pkg_hash;
756 const gop = f.job_queue.table.getOrPutAssumeCapacity(pkg_hash);
757 if (gop.found_existing) {
758 if (!dep.lazy and gop.value_ptr.*.lazy_status != .eager) {
759 gop.value_ptr.*.lazy_status = .eager;
760 promoted_existing_to_eager = true;
761 } else {
762 continue;
763 }
764 }
765 gop.value_ptr.* = new_fetch;
766 break :h pkg_hash;
767 },
768 },
769 },
770 .path => |rel_path| l: {
771 // This might produce an invalid path, which is checked for
772 // at the beginning of run().
773 const new_root = try f.package_root.resolvePosix(parent_arena, rel_path);
774 const pkg_hash = relativePathDigest(new_root, cache_root);
775 const gop = f.job_queue.table.getOrPutAssumeCapacity(pkg_hash);
776 if (gop.found_existing) {
777 if (!dep.lazy and gop.value_ptr.*.lazy_status != .eager) {
778 gop.value_ptr.*.lazy_status = .eager;
779 promoted_existing_to_eager = true;
780 } else {
781 continue;
782 }
783 }
784 gop.value_ptr.* = new_fetch;
785 break :l .{ .relative_path = new_root };
786 },
787 };
788 prog_names[new_fetch_index] = dep_name;
789 new_fetch_index += 1;
790 if (!promoted_existing_to_eager) {
791 f.job_queue.all_fetches.appendAssumeCapacity(new_fetch);
792 }
793 new_fetch.* = .{
794 .arena = std.heap.ArenaAllocator.init(gpa),
795 .location = location,
796 .location_tok = dep.location_tok,
797 .hash_tok = dep.hash_tok,
798 .name_tok = dep.name_tok,
799 .lazy_status = switch (f.job_queue.mode) {
800 .needed => if (dep.lazy) .available else .eager,
801 .all => .eager,
802 },
803 .parent_package_root = f.package_root,
804 .parent_manifest_ast = &f.manifest_ast,
805 .prog_node = f.prog_node,
806 .job_queue = f.job_queue,
807 .omit_missing_hash_error = false,
808 .allow_missing_paths_field = true,
809 .allow_missing_fingerprint = true,
810 .allow_name_string = true,
811 .use_latest_commit = false,
812
813 .package_root = undefined,
814 .error_bundle = undefined,
815 .manifest = null,
816 .manifest_ast = undefined,
817 .computed_hash = undefined,
818 .has_build_zig = false,
819 .oom_flag = false,
820 .latest_commit = null,
821
822 .module = null,
823 };
824 }
825
826 f.prog_node.increaseEstimatedTotalItems(new_fetch_index);
827
828 break :nf .{ new_fetches[0..new_fetch_index], prog_names[0..new_fetch_index] };
829 };
830
831 // Now it's time to dispatch tasks.
832 for (new_fetches, prog_names) |*new_fetch, prog_name| {
833 f.job_queue.group.async(io, workerRun, .{ new_fetch, prog_name });
834 }
835}
836
837pub fn relativePathDigest(pkg_root: Cache.Path, cache_root: Cache.Directory) Package.Hash {
838 return .initPath(pkg_root.sub_path, pkg_root.root_dir.eql(cache_root));
839}
840
841pub fn workerRun(f: *Fetch, prog_name: []const u8) void {
842 const prog_node = f.prog_node.start(prog_name, 0);
843 defer prog_node.end();
844
845 run(f) catch |err| switch (err) {
846 error.OutOfMemory => f.oom_flag = true,
847 error.Canceled => {},
848 error.FetchFailed => {
849 // Nothing to do because the errors are already reported in `error_bundle`,
850 // and a reference is kept to the `Fetch` task inside `all_fetches`.
851 },
852 };
853}
854
855fn srcLoc(
856 f: *Fetch,
857 tok: std.zig.Ast.TokenIndex,
858) Allocator.Error!ErrorBundle.SourceLocationIndex {
859 const ast = f.parent_manifest_ast orelse return .none;
860 const eb = &f.error_bundle;
861 const start_loc = ast.tokenLocation(0, tok);
862 const src_path = try eb.printString("{f}" ++ fs.path.sep_str ++ Manifest.basename, .{f.parent_package_root});
863 const msg_off = 0;
864 return eb.addSourceLocation(.{
865 .src_path = src_path,
866 .span_start = ast.tokenStart(tok),
867 .span_end = @intCast(ast.tokenStart(tok) + ast.tokenSlice(tok).len),
868 .span_main = ast.tokenStart(tok) + msg_off,
869 .line = @intCast(start_loc.line),
870 .column = @intCast(start_loc.column),
871 .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]),
872 });
873}
874
875fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError {
876 const eb = &f.error_bundle;
877 try eb.addRootErrorMessage(.{
878 .msg = msg_str,
879 .src_loc = try f.srcLoc(msg_tok),
880 });
881 return error.FetchFailed;
882}
883
884const Resource = union(enum) {
885 file: fs.File.Reader,
886 http_request: HttpRequest,
887 git: Git,
888 dir: fs.Dir,
889
890 const Git = struct {
891 session: git.Session,
892 fetch_stream: git.Session.FetchStream,
893 want_oid: git.Oid,
894 };
895
896 const HttpRequest = struct {
897 request: std.http.Client.Request,
898 response: std.http.Client.Response,
899 transfer_buffer: []u8,
900 decompress: std.http.Decompress,
901 decompress_buffer: []u8,
902 };
903
904 fn deinit(resource: *Resource, io: Io) void {
905 switch (resource.*) {
906 .file => |*file_reader| file_reader.file.close(io),
907 .http_request => |*http_request| http_request.request.deinit(),
908 .git => |*git_resource| {
909 git_resource.fetch_stream.deinit();
910 },
911 .dir => |*dir| dir.close(),
912 }
913 resource.* = undefined;
914 }
915
916 fn reader(resource: *Resource) *Io.Reader {
917 return switch (resource.*) {
918 .file => |*file_reader| return &file_reader.interface,
919 .http_request => |*http_request| return http_request.response.readerDecompressing(
920 http_request.transfer_buffer,
921 &http_request.decompress,
922 http_request.decompress_buffer,
923 ),
924 .git => |*g| return &g.fetch_stream.reader,
925 .dir => unreachable,
926 };
927 }
928};
929
930const FileType = enum {
931 tar,
932 @"tar.gz",
933 @"tar.xz",
934 @"tar.zst",
935 git_pack,
936 zip,
937
938 fn fromPath(file_path: []const u8) ?FileType {
939 if (ascii.endsWithIgnoreCase(file_path, ".tar")) return .tar;
940 if (ascii.endsWithIgnoreCase(file_path, ".tgz")) return .@"tar.gz";
941 if (ascii.endsWithIgnoreCase(file_path, ".tar.gz")) return .@"tar.gz";
942 if (ascii.endsWithIgnoreCase(file_path, ".txz")) return .@"tar.xz";
943 if (ascii.endsWithIgnoreCase(file_path, ".tar.xz")) return .@"tar.xz";
944 if (ascii.endsWithIgnoreCase(file_path, ".tzst")) return .@"tar.zst";
945 if (ascii.endsWithIgnoreCase(file_path, ".tar.zst")) return .@"tar.zst";
946 if (ascii.endsWithIgnoreCase(file_path, ".zip")) return .zip;
947 if (ascii.endsWithIgnoreCase(file_path, ".jar")) return .zip;
948 return null;
949 }
950
951 /// Parameter is a content-disposition header value.
952 fn fromContentDisposition(cd_header: []const u8) ?FileType {
953 const attach_end = ascii.indexOfIgnoreCase(cd_header, "attachment;") orelse
954 return null;
955
956 var value_start = ascii.indexOfIgnoreCasePos(cd_header, attach_end + 1, "filename") orelse
957 return null;
958 value_start += "filename".len;
959 if (cd_header[value_start] == '*') {
960 value_start += 1;
961 }
962 if (cd_header[value_start] != '=') return null;
963 value_start += 1;
964
965 var value_end = std.mem.indexOfPos(u8, cd_header, value_start, ";") orelse cd_header.len;
966 if (cd_header[value_end - 1] == '\"') {
967 value_end -= 1;
968 }
969 return fromPath(cd_header[value_start..value_end]);
970 }
971
972 test fromContentDisposition {
973 try std.testing.expectEqual(@as(?FileType, .@"tar.gz"), fromContentDisposition("attaChment; FILENAME=\"stuff.tar.gz\"; size=42"));
974 try std.testing.expectEqual(@as(?FileType, .@"tar.gz"), fromContentDisposition("attachment; filename*=\"stuff.tar.gz\""));
975 try std.testing.expectEqual(@as(?FileType, .@"tar.xz"), fromContentDisposition("ATTACHMENT; filename=\"stuff.tar.xz\""));
976 try std.testing.expectEqual(@as(?FileType, .@"tar.xz"), fromContentDisposition("attachment; FileName=\"stuff.tar.xz\""));
977 try std.testing.expectEqual(@as(?FileType, .@"tar.gz"), fromContentDisposition("attachment; FileName*=UTF-8\'\'xyz%2Fstuff.tar.gz"));
978 try std.testing.expectEqual(@as(?FileType, .tar), fromContentDisposition("attachment; FileName=\"stuff.tar\""));
979
980 try std.testing.expect(fromContentDisposition("attachment FileName=\"stuff.tar.gz\"") == null);
981 try std.testing.expect(fromContentDisposition("attachment; FileName\"stuff.gz\"") == null);
982 try std.testing.expect(fromContentDisposition("attachment; size=42") == null);
983 try std.testing.expect(fromContentDisposition("inline; size=42") == null);
984 try std.testing.expect(fromContentDisposition("FileName=\"stuff.tar.gz\"; attachment;") == null);
985 try std.testing.expect(fromContentDisposition("FileName=\"stuff.tar.gz\";") == null);
986 }
987};
988
989const init_resource_buffer_size = git.Packet.max_data_length;
990
991fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u8) RunError!void {
992 const io = f.job_queue.io;
993 const arena = f.arena.allocator();
994 const eb = &f.error_bundle;
995
996 if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
997 const path = try uri.path.toRawMaybeAlloc(arena);
998 const file = f.parent_package_root.openFile(path, .{}) catch |err| {
999 return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{
1000 f.parent_package_root, path, err,
1001 }));
1002 };
1003 resource.* = .{ .file = file.reader(io, reader_buffer) };
1004 return;
1005 }
1006
1007 const http_client = f.job_queue.http_client;
1008
1009 if (ascii.eqlIgnoreCase(uri.scheme, "http") or
1010 ascii.eqlIgnoreCase(uri.scheme, "https"))
1011 {
1012 resource.* = .{ .http_request = .{
1013 .request = http_client.request(.GET, uri, .{}) catch |err|
1014 return f.fail(f.location_tok, try eb.printString("unable to connect to server: {t}", .{err})),
1015 .response = undefined,
1016 .transfer_buffer = reader_buffer,
1017 .decompress_buffer = &.{},
1018 .decompress = undefined,
1019 } };
1020 const request = &resource.http_request.request;
1021 errdefer request.deinit();
1022
1023 request.sendBodiless() catch |err|
1024 return f.fail(f.location_tok, try eb.printString("HTTP request failed: {t}", .{err}));
1025
1026 var redirect_buffer: [1024]u8 = undefined;
1027 const response = &resource.http_request.response;
1028 response.* = request.receiveHead(&redirect_buffer) catch |err| switch (err) {
1029 error.ReadFailed => {
1030 return f.fail(f.location_tok, try eb.printString("HTTP response read failure: {t}", .{
1031 request.connection.?.getReadError().?,
1032 }));
1033 },
1034 else => |e| return f.fail(f.location_tok, try eb.printString("invalid HTTP response: {t}", .{e})),
1035 };
1036
1037 if (response.head.status != .ok) return f.fail(f.location_tok, try eb.printString(
1038 "bad HTTP response code: '{d} {s}'",
1039 .{ response.head.status, response.head.status.phrase() orelse "" },
1040 ));
1041
1042 resource.http_request.decompress_buffer = try arena.alloc(u8, response.head.content_encoding.minBufferCapacity());
1043 return;
1044 }
1045
1046 if (ascii.eqlIgnoreCase(uri.scheme, "git+http") or
1047 ascii.eqlIgnoreCase(uri.scheme, "git+https"))
1048 {
1049 var transport_uri = uri;
1050 transport_uri.scheme = uri.scheme["git+".len..];
1051 var session = git.Session.init(arena, http_client, transport_uri, reader_buffer) catch |err| {
1052 return f.fail(
1053 f.location_tok,
1054 try eb.printString("unable to discover remote git server capabilities: {t}", .{err}),
1055 );
1056 };
1057
1058 const want_oid = want_oid: {
1059 const want_ref =
1060 if (uri.fragment) |fragment| try fragment.toRawMaybeAlloc(arena) else "HEAD";
1061 if (git.Oid.parseAny(want_ref)) |oid| break :want_oid oid else |_| {}
1062
1063 const want_ref_head = try std.fmt.allocPrint(arena, "refs/heads/{s}", .{want_ref});
1064 const want_ref_tag = try std.fmt.allocPrint(arena, "refs/tags/{s}", .{want_ref});
1065
1066 var ref_iterator: git.Session.RefIterator = undefined;
1067 session.listRefs(&ref_iterator, .{
1068 .ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
1069 .include_peeled = true,
1070 .buffer = reader_buffer,
1071 }) catch |err| return f.fail(f.location_tok, try eb.printString("unable to list refs: {t}", .{err}));
1072 defer ref_iterator.deinit();
1073 while (ref_iterator.next() catch |err| {
1074 return f.fail(f.location_tok, try eb.printString(
1075 "unable to iterate refs: {s}",
1076 .{@errorName(err)},
1077 ));
1078 }) |ref| {
1079 if (std.mem.eql(u8, ref.name, want_ref) or
1080 std.mem.eql(u8, ref.name, want_ref_head) or
1081 std.mem.eql(u8, ref.name, want_ref_tag))
1082 {
1083 break :want_oid ref.peeled orelse ref.oid;
1084 }
1085 }
1086 return f.fail(f.location_tok, try eb.printString("ref not found: {s}", .{want_ref}));
1087 };
1088 if (f.use_latest_commit) {
1089 f.latest_commit = want_oid;
1090 } else if (uri.fragment == null) {
1091 const notes_len = 1;
1092 try eb.addRootErrorMessage(.{
1093 .msg = try eb.addString("url field is missing an explicit ref"),
1094 .src_loc = try f.srcLoc(f.location_tok),
1095 .notes_len = notes_len,
1096 });
1097 const notes_start = try eb.reserveNotes(notes_len);
1098 eb.extra.items[notes_start] = @intFromEnum(try eb.addErrorMessage(.{
1099 .msg = try eb.printString("try .url = \"{f}#{f}\",", .{
1100 uri.fmt(.{ .scheme = true, .authority = true, .path = true }),
1101 want_oid,
1102 }),
1103 }));
1104 return error.FetchFailed;
1105 }
1106
1107 var want_oid_buf: [git.Oid.max_formatted_length]u8 = undefined;
1108 _ = std.fmt.bufPrint(&want_oid_buf, "{f}", .{want_oid}) catch unreachable;
1109 resource.* = .{ .git = .{
1110 .session = session,
1111 .fetch_stream = undefined,
1112 .want_oid = want_oid,
1113 } };
1114 const fetch_stream = &resource.git.fetch_stream;
1115 session.fetch(fetch_stream, &.{&want_oid_buf}, reader_buffer) catch |err| {
1116 return f.fail(f.location_tok, try eb.printString("unable to create fetch stream: {t}", .{err}));
1117 };
1118 errdefer fetch_stream.deinit(fetch_stream);
1119
1120 return;
1121 }
1122
1123 return f.fail(f.location_tok, try eb.printString("unsupported URL scheme: {s}", .{uri.scheme}));
1124}
1125
1126fn unpackResource(
1127 f: *Fetch,
1128 resource: *Resource,
1129 uri_path: []const u8,
1130 tmp_directory: Cache.Directory,
1131) RunError!UnpackResult {
1132 const eb = &f.error_bundle;
1133 const file_type = switch (resource.*) {
1134 .file => FileType.fromPath(uri_path) orelse
1135 return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path})),
1136
1137 .http_request => |*http_request| ft: {
1138 const head = &http_request.response.head;
1139
1140 // Content-Type takes first precedence.
1141 const content_type = head.content_type orelse
1142 return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
1143
1144 // Extract the MIME type, ignoring charset and boundary directives
1145 const mime_type_end = std.mem.indexOf(u8, content_type, ";") orelse content_type.len;
1146 const mime_type = content_type[0..mime_type_end];
1147
1148 if (ascii.eqlIgnoreCase(mime_type, "application/x-tar"))
1149 break :ft .tar;
1150
1151 if (ascii.eqlIgnoreCase(mime_type, "application/gzip") or
1152 ascii.eqlIgnoreCase(mime_type, "application/x-gzip") or
1153 ascii.eqlIgnoreCase(mime_type, "application/tar+gzip") or
1154 ascii.eqlIgnoreCase(mime_type, "application/x-tar-gz") or
1155 ascii.eqlIgnoreCase(mime_type, "application/x-gtar-compressed"))
1156 {
1157 break :ft .@"tar.gz";
1158 }
1159
1160 if (ascii.eqlIgnoreCase(mime_type, "application/x-xz"))
1161 break :ft .@"tar.xz";
1162
1163 if (ascii.eqlIgnoreCase(mime_type, "application/zstd"))
1164 break :ft .@"tar.zst";
1165
1166 if (ascii.eqlIgnoreCase(mime_type, "application/zip") or
1167 ascii.eqlIgnoreCase(mime_type, "application/x-zip-compressed") or
1168 ascii.eqlIgnoreCase(mime_type, "application/java-archive"))
1169 {
1170 break :ft .zip;
1171 }
1172
1173 if (!ascii.eqlIgnoreCase(mime_type, "application/octet-stream") and
1174 !ascii.eqlIgnoreCase(mime_type, "application/x-compressed"))
1175 {
1176 return f.fail(f.location_tok, try eb.printString(
1177 "unrecognized 'Content-Type' header: '{s}'",
1178 .{content_type},
1179 ));
1180 }
1181
1182 // Next, the filename from 'content-disposition: attachment' takes precedence.
1183 if (head.content_disposition) |cd_header| {
1184 break :ft FileType.fromContentDisposition(cd_header) orelse {
1185 return f.fail(f.location_tok, try eb.printString(
1186 "unsupported Content-Disposition header value: '{s}' for Content-Type=application/octet-stream",
1187 .{cd_header},
1188 ));
1189 };
1190 }
1191
1192 // Finally, the path from the URI is used.
1193 break :ft FileType.fromPath(uri_path) orelse {
1194 return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path}));
1195 };
1196 },
1197
1198 .git => .git_pack,
1199
1200 .dir => |dir| {
1201 f.recursiveDirectoryCopy(dir, tmp_directory.handle) catch |err| {
1202 return f.fail(f.location_tok, try eb.printString("unable to copy directory '{s}': {t}", .{
1203 uri_path, err,
1204 }));
1205 };
1206 return .{};
1207 },
1208 };
1209
1210 switch (file_type) {
1211 .tar => {
1212 return unpackTarball(f, tmp_directory.handle, resource.reader());
1213 },
1214 .@"tar.gz" => {
1215 var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined;
1216 var decompress: std.compress.flate.Decompress = .init(resource.reader(), .gzip, &flate_buffer);
1217 return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
1218 },
1219 .@"tar.xz" => {
1220 const gpa = f.arena.child_allocator;
1221 var decompress = std.compress.xz.Decompress.init(resource.reader(), gpa, &.{}) catch |err|
1222 return f.fail(f.location_tok, try eb.printString("unable to decompress tarball: {t}", .{err}));
1223 defer decompress.deinit();
1224 return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
1225 },
1226 .@"tar.zst" => {
1227 const window_len = std.compress.zstd.default_window_len;
1228 const window_buffer = try f.arena.allocator().alloc(u8, window_len + std.compress.zstd.block_size_max);
1229 var decompress: std.compress.zstd.Decompress = .init(resource.reader(), window_buffer, .{
1230 .verify_checksum = false,
1231 .window_len = window_len,
1232 });
1233 return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
1234 },
1235 .git_pack => return unpackGitPack(f, tmp_directory.handle, &resource.git) catch |err| switch (err) {
1236 error.FetchFailed => return error.FetchFailed,
1237 error.OutOfMemory => return error.OutOfMemory,
1238 else => |e| return f.fail(f.location_tok, try eb.printString("unable to unpack git files: {t}", .{e})),
1239 },
1240 .zip => return unzip(f, tmp_directory.handle, resource.reader()) catch |err| switch (err) {
1241 error.ReadFailed => return f.fail(f.location_tok, try eb.printString(
1242 "failed reading resource: {t}",
1243 .{err},
1244 )),
1245 else => |e| return e,
1246 },
1247 }
1248}
1249
1250fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *Io.Reader) RunError!UnpackResult {
1251 const eb = &f.error_bundle;
1252 const arena = f.arena.allocator();
1253
1254 var diagnostics: std.tar.Diagnostics = .{ .allocator = arena };
1255
1256 std.tar.pipeToFileSystem(out_dir, reader, .{
1257 .diagnostics = &diagnostics,
1258 .strip_components = 0,
1259 .mode_mode = .ignore,
1260 .exclude_empty_directories = true,
1261 }) catch |err| return f.fail(
1262 f.location_tok,
1263 try eb.printString("unable to unpack tarball to temporary directory: {t}", .{err}),
1264 );
1265
1266 var res: UnpackResult = .{ .root_dir = diagnostics.root_dir };
1267 if (diagnostics.errors.items.len > 0) {
1268 try res.allocErrors(arena, diagnostics.errors.items.len, "unable to unpack tarball");
1269 for (diagnostics.errors.items) |item| {
1270 switch (item) {
1271 .unable_to_create_file => |i| res.unableToCreateFile(stripRoot(i.file_name, res.root_dir), i.code),
1272 .unable_to_create_sym_link => |i| res.unableToCreateSymLink(stripRoot(i.file_name, res.root_dir), i.link_name, i.code),
1273 .unsupported_file_type => |i| res.unsupportedFileType(stripRoot(i.file_name, res.root_dir), @intFromEnum(i.file_type)),
1274 .components_outside_stripped_prefix => unreachable, // unreachable with strip_components = 0
1275 }
1276 }
1277 }
1278 return res;
1279}
1280
1281fn unzip(
1282 f: *Fetch,
1283 out_dir: fs.Dir,
1284 reader: *Io.Reader,
1285) error{ ReadFailed, OutOfMemory, Canceled, FetchFailed }!UnpackResult {
1286 // We write the entire contents to a file first because zip files
1287 // must be processed back to front and they could be too large to
1288 // load into memory.
1289
1290 const io = f.job_queue.io;
1291 const cache_root = f.job_queue.global_cache;
1292 const prefix = "tmp/";
1293 const suffix = ".zip";
1294 const eb = &f.error_bundle;
1295 const random_len = @sizeOf(u64) * 2;
1296
1297 var zip_path: [prefix.len + random_len + suffix.len]u8 = undefined;
1298 zip_path[0..prefix.len].* = prefix.*;
1299 zip_path[prefix.len + random_len ..].* = suffix.*;
1300
1301 var zip_file = while (true) {
1302 const random_integer = std.crypto.random.int(u64);
1303 zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer);
1304
1305 break cache_root.handle.createFile(&zip_path, .{
1306 .exclusive = true,
1307 .read = true,
1308 }) catch |err| switch (err) {
1309 error.PathAlreadyExists => continue,
1310 error.Canceled => return error.Canceled,
1311 else => |e| return f.fail(
1312 f.location_tok,
1313 try eb.printString("failed to create temporary zip file: {t}", .{e}),
1314 ),
1315 };
1316 };
1317 defer zip_file.close();
1318 var zip_file_buffer: [4096]u8 = undefined;
1319 var zip_file_reader = b: {
1320 var zip_file_writer = zip_file.writer(&zip_file_buffer);
1321
1322 _ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) {
1323 error.ReadFailed => return error.ReadFailed,
1324 error.WriteFailed => return f.fail(
1325 f.location_tok,
1326 try eb.printString("failed writing temporary zip file: {t}", .{err}),
1327 ),
1328 };
1329 zip_file_writer.interface.flush() catch |err| return f.fail(
1330 f.location_tok,
1331 try eb.printString("failed writing temporary zip file: {t}", .{err}),
1332 );
1333 break :b zip_file_writer.moveToReader(io);
1334 };
1335
1336 var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
1337 // no need to deinit since we are using an arena allocator
1338
1339 zip_file_reader.seekTo(0) catch |err|
1340 return f.fail(f.location_tok, try eb.printString("failed to seek temporary zip file: {t}", .{err}));
1341 std.zip.extract(out_dir, &zip_file_reader, .{
1342 .allow_backslashes = true,
1343 .diagnostics = &diagnostics,
1344 }) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err}));
1345
1346 cache_root.handle.deleteFile(&zip_path) catch |err|
1347 return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err}));
1348
1349 return .{ .root_dir = diagnostics.root_dir };
1350}
1351
1352fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
1353 const io = f.job_queue.io;
1354 const arena = f.arena.allocator();
1355 // TODO don't try to get a gpa from an arena. expose this dependency higher up
1356 // because the backing of arena could be page allocator
1357 const gpa = f.arena.child_allocator;
1358 const object_format: git.Oid.Format = resource.want_oid;
1359
1360 var res: UnpackResult = .{};
1361 // The .git directory is used to store the packfile and associated index, but
1362 // we do not attempt to replicate the exact structure of a real .git
1363 // directory, since that isn't relevant for fetching a package.
1364 {
1365 var pack_dir = try out_dir.makeOpenPath(".git", .{});
1366 defer pack_dir.close();
1367 var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
1368 defer pack_file.close();
1369 var pack_file_buffer: [4096]u8 = undefined;
1370 var pack_file_reader = b: {
1371 var pack_file_writer = pack_file.writer(&pack_file_buffer);
1372 const fetch_reader = &resource.fetch_stream.reader;
1373 _ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
1374 try pack_file_writer.interface.flush();
1375 break :b pack_file_writer.moveToReader(io);
1376 };
1377
1378 var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
1379 defer index_file.close();
1380 var index_file_buffer: [2000]u8 = undefined;
1381 var index_file_writer = index_file.writer(&index_file_buffer);
1382 {
1383 const index_prog_node = f.prog_node.start("Index pack", 0);
1384 defer index_prog_node.end();
1385 try git.indexPack(gpa, object_format, &pack_file_reader, &index_file_writer);
1386 }
1387
1388 {
1389 var index_file_reader = index_file.reader(io, &index_file_buffer);
1390 const checkout_prog_node = f.prog_node.start("Checkout", 0);
1391 defer checkout_prog_node.end();
1392 var repository: git.Repository = undefined;
1393 try repository.init(gpa, object_format, &pack_file_reader, &index_file_reader);
1394 defer repository.deinit();
1395 var diagnostics: git.Diagnostics = .{ .allocator = arena };
1396 try repository.checkout(out_dir, resource.want_oid, &diagnostics);
1397
1398 if (diagnostics.errors.items.len > 0) {
1399 try res.allocErrors(arena, diagnostics.errors.items.len, "unable to unpack packfile");
1400 for (diagnostics.errors.items) |item| {
1401 switch (item) {
1402 .unable_to_create_file => |i| res.unableToCreateFile(i.file_name, i.code),
1403 .unable_to_create_sym_link => |i| res.unableToCreateSymLink(i.file_name, i.link_name, i.code),
1404 }
1405 }
1406 }
1407 }
1408 }
1409
1410 try out_dir.deleteTree(".git");
1411 return res;
1412}
1413
1414fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void {
1415 const gpa = f.arena.child_allocator;
1416 // Recursive directory copy.
1417 var it = try dir.walk(gpa);
1418 defer it.deinit();
1419 while (try it.next()) |entry| {
1420 switch (entry.kind) {
1421 .directory => {}, // omit empty directories
1422 .file => {
1423 dir.copyFile(
1424 entry.path,
1425 tmp_dir,
1426 entry.path,
1427 .{},
1428 ) catch |err| switch (err) {
1429 error.FileNotFound => {
1430 if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
1431 try dir.copyFile(entry.path, tmp_dir, entry.path, .{});
1432 },
1433 else => |e| return e,
1434 };
1435 },
1436 .sym_link => {
1437 var buf: [fs.max_path_bytes]u8 = undefined;
1438 const link_name = try dir.readLink(entry.path, &buf);
1439 // TODO: if this would create a symlink to outside
1440 // the destination directory, fail with an error instead.
1441 tmp_dir.symLink(link_name, entry.path, .{}) catch |err| switch (err) {
1442 error.FileNotFound => {
1443 if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
1444 try tmp_dir.symLink(link_name, entry.path, .{});
1445 },
1446 else => |e| return e,
1447 };
1448 },
1449 else => return error.IllegalFileTypeInPackage,
1450 }
1451 }
1452}
1453
1454pub fn renameTmpIntoCache(cache_dir: fs.Dir, tmp_dir_sub_path: []const u8, dest_dir_sub_path: []const u8) !void {
1455 assert(dest_dir_sub_path[1] == fs.path.sep);
1456 var handled_missing_dir = false;
1457 while (true) {
1458 cache_dir.rename(tmp_dir_sub_path, dest_dir_sub_path) catch |err| switch (err) {
1459 error.FileNotFound => {
1460 if (handled_missing_dir) return err;
1461 cache_dir.makeDir(dest_dir_sub_path[0..1]) catch |mkd_err| switch (mkd_err) {
1462 error.PathAlreadyExists => handled_missing_dir = true,
1463 else => |e| return e,
1464 };
1465 continue;
1466 },
1467 error.PathAlreadyExists, error.AccessDenied => {
1468 // Package has been already downloaded and may already be in use on the system.
1469 cache_dir.deleteTree(tmp_dir_sub_path) catch {
1470 // Garbage files leftover in zig-cache/tmp/ is, as they say
1471 // on Star Trek, "operating within normal parameters".
1472 };
1473 },
1474 else => |e| return e,
1475 };
1476 break;
1477 }
1478}
1479
1480const ComputedHash = struct {
1481 digest: Package.Hash.Digest,
1482 total_size: u64,
1483};
1484
1485/// Assumes that files not included in the package have already been filtered
1486/// prior to calling this function. This ensures that files not protected by
1487/// the hash are not present on the file system. Empty directories are *not
1488/// hashed* and must not be present on the file system when calling this
1489/// function.
1490fn computeHash(f: *Fetch, pkg_path: Cache.Path, filter: Filter) RunError!ComputedHash {
1491 const io = f.job_queue.io;
1492 // All the path name strings need to be in memory for sorting.
1493 const arena = f.arena.allocator();
1494 const gpa = f.arena.child_allocator;
1495 const eb = &f.error_bundle;
1496 const root_dir = pkg_path.root_dir.handle;
1497
1498 // Collect all files, recursively, then sort.
1499 var all_files = std.array_list.Managed(*HashedFile).init(gpa);
1500 defer all_files.deinit();
1501
1502 var deleted_files = std.array_list.Managed(*DeletedFile).init(gpa);
1503 defer deleted_files.deinit();
1504
1505 // Track directories which had any files deleted from them so that empty directories
1506 // can be deleted.
1507 var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .empty;
1508 defer sus_dirs.deinit(gpa);
1509
1510 var walker = try root_dir.walk(gpa);
1511 defer walker.deinit();
1512
1513 // Total number of bytes of file contents included in the package.
1514 var total_size: u64 = 0;
1515
1516 {
1517 // The final hash will be a hash of each file hashed independently. This
1518 // allows hashing in parallel.
1519 var group: Io.Group = .init;
1520 defer group.wait(io);
1521
1522 while (walker.next() catch |err| {
1523 try eb.addRootErrorMessage(.{ .msg = try eb.printString(
1524 "unable to walk temporary directory '{f}': {s}",
1525 .{ pkg_path, @errorName(err) },
1526 ) });
1527 return error.FetchFailed;
1528 }) |entry| {
1529 if (entry.kind == .directory) continue;
1530
1531 const entry_pkg_path = stripRoot(entry.path, pkg_path.sub_path);
1532 if (!filter.includePath(entry_pkg_path)) {
1533 // Delete instead of including in hash calculation.
1534 const fs_path = try arena.dupe(u8, entry.path);
1535
1536 // Also track the parent directory in case it becomes empty.
1537 if (fs.path.dirname(fs_path)) |parent|
1538 try sus_dirs.put(gpa, parent, {});
1539
1540 const deleted_file = try arena.create(DeletedFile);
1541 deleted_file.* = .{
1542 .fs_path = fs_path,
1543 .failure = undefined, // to be populated by the worker
1544 };
1545 group.async(io, workerDeleteFile, .{ root_dir, deleted_file });
1546 try deleted_files.append(deleted_file);
1547 continue;
1548 }
1549
1550 const kind: HashedFile.Kind = switch (entry.kind) {
1551 .directory => unreachable,
1552 .file => .file,
1553 .sym_link => .link,
1554 else => return f.fail(f.location_tok, try eb.printString(
1555 "package contains '{s}' which has illegal file type '{s}'",
1556 .{ entry.path, @tagName(entry.kind) },
1557 )),
1558 };
1559
1560 if (std.mem.eql(u8, entry_pkg_path, Package.build_zig_basename))
1561 f.has_build_zig = true;
1562
1563 const fs_path = try arena.dupe(u8, entry.path);
1564 const hashed_file = try arena.create(HashedFile);
1565 hashed_file.* = .{
1566 .fs_path = fs_path,
1567 .normalized_path = try normalizePathAlloc(arena, entry_pkg_path),
1568 .kind = kind,
1569 .hash = undefined, // to be populated by the worker
1570 .failure = undefined, // to be populated by the worker
1571 .size = undefined, // to be populated by the worker
1572 };
1573 group.async(io, workerHashFile, .{ root_dir, hashed_file });
1574 try all_files.append(hashed_file);
1575 }
1576 }
1577
1578 {
1579 // Sort by length, descending, so that child directories get removed first.
1580 sus_dirs.sortUnstable(@as(struct {
1581 keys: []const []const u8,
1582 pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
1583 return ctx.keys[b_index].len < ctx.keys[a_index].len;
1584 }
1585 }, .{ .keys = sus_dirs.keys() }));
1586
1587 // During this loop, more entries will be added, so we must loop by index.
1588 var i: usize = 0;
1589 while (i < sus_dirs.count()) : (i += 1) {
1590 const sus_dir = sus_dirs.keys()[i];
1591 root_dir.deleteDir(sus_dir) catch |err| switch (err) {
1592 error.DirNotEmpty => continue,
1593 error.FileNotFound => continue,
1594 else => |e| {
1595 try eb.addRootErrorMessage(.{ .msg = try eb.printString(
1596 "unable to delete empty directory '{s}': {s}",
1597 .{ sus_dir, @errorName(e) },
1598 ) });
1599 return error.FetchFailed;
1600 },
1601 };
1602 if (fs.path.dirname(sus_dir)) |parent| {
1603 try sus_dirs.put(gpa, parent, {});
1604 }
1605 }
1606 }
1607
1608 std.mem.sortUnstable(*HashedFile, all_files.items, {}, HashedFile.lessThan);
1609
1610 var hasher = Package.Hash.Algo.init(.{});
1611 var any_failures = false;
1612 for (all_files.items) |hashed_file| {
1613 hashed_file.failure catch |err| {
1614 any_failures = true;
1615 try eb.addRootErrorMessage(.{
1616 .msg = try eb.printString("unable to hash '{s}': {s}", .{
1617 hashed_file.fs_path, @errorName(err),
1618 }),
1619 });
1620 };
1621 hasher.update(&hashed_file.hash);
1622 total_size += hashed_file.size;
1623 }
1624 for (deleted_files.items) |deleted_file| {
1625 deleted_file.failure catch |err| {
1626 any_failures = true;
1627 try eb.addRootErrorMessage(.{
1628 .msg = try eb.printString("failed to delete excluded path '{s}' from package: {s}", .{
1629 deleted_file.fs_path, @errorName(err),
1630 }),
1631 });
1632 };
1633 }
1634
1635 if (any_failures) return error.FetchFailed;
1636
1637 if (f.job_queue.debug_hash) {
1638 assert(!f.job_queue.recursive);
1639 // Print something to stdout that can be text diffed to figure out why
1640 // the package hash is different.
1641 dumpHashInfo(all_files.items) catch |err| {
1642 std.debug.print("unable to write to stdout: {s}\n", .{@errorName(err)});
1643 std.process.exit(1);
1644 };
1645 }
1646
1647 return .{
1648 .digest = hasher.finalResult(),
1649 .total_size = total_size,
1650 };
1651}
1652
1653fn dumpHashInfo(all_files: []const *const HashedFile) !void {
1654 var stdout_buffer: [1024]u8 = undefined;
1655 var stdout_writer: fs.File.Writer = .initStreaming(.stdout(), &stdout_buffer);
1656 const w = &stdout_writer.interface;
1657 for (all_files) |hashed_file| {
1658 try w.print("{t}: {x}: {s}\n", .{ hashed_file.kind, &hashed_file.hash, hashed_file.normalized_path });
1659 }
1660 try w.flush();
1661}
1662
1663fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile) void {
1664 hashed_file.failure = hashFileFallible(dir, hashed_file);
1665}
1666
1667fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void {
1668 deleted_file.failure = deleteFileFallible(dir, deleted_file);
1669}
1670
1671fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
1672 var buf: [8000]u8 = undefined;
1673 var hasher = Package.Hash.Algo.init(.{});
1674 hasher.update(hashed_file.normalized_path);
1675 var file_size: u64 = 0;
1676
1677 switch (hashed_file.kind) {
1678 .file => {
1679 var file = try dir.openFile(hashed_file.fs_path, .{});
1680 defer file.close();
1681 // Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463
1682 hasher.update(&.{ 0, 0 });
1683 var file_header: FileHeader = .{};
1684 while (true) {
1685 const bytes_read = try file.read(&buf);
1686 if (bytes_read == 0) break;
1687 file_size += bytes_read;
1688 hasher.update(buf[0..bytes_read]);
1689 file_header.update(buf[0..bytes_read]);
1690 }
1691 if (file_header.isExecutable()) {
1692 try setExecutable(file);
1693 }
1694 },
1695 .link => {
1696 const link_name = try dir.readLink(hashed_file.fs_path, &buf);
1697 if (fs.path.sep != canonical_sep) {
1698 // Package hashes are intended to be consistent across
1699 // platforms which means we must normalize path separators
1700 // inside symlinks.
1701 normalizePath(link_name);
1702 }
1703 hasher.update(link_name);
1704 },
1705 }
1706 hasher.final(&hashed_file.hash);
1707 hashed_file.size = file_size;
1708}
1709
1710fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void {
1711 try dir.deleteFile(deleted_file.fs_path);
1712}
1713
1714fn setExecutable(file: fs.File) !void {
1715 if (!std.fs.has_executable_bit) return;
1716
1717 const S = std.posix.S;
1718 const mode = fs.File.default_mode | S.IXUSR | S.IXGRP | S.IXOTH;
1719 try file.chmod(mode);
1720}
1721
1722const DeletedFile = struct {
1723 fs_path: []const u8,
1724 failure: Error!void,
1725
1726 const Error =
1727 fs.Dir.DeleteFileError ||
1728 fs.Dir.DeleteDirError;
1729};
1730
1731const HashedFile = struct {
1732 fs_path: []const u8,
1733 normalized_path: []const u8,
1734 hash: Package.Hash.Digest,
1735 failure: Error!void,
1736 kind: Kind,
1737 size: u64,
1738
1739 const Error =
1740 fs.File.OpenError ||
1741 fs.File.ReadError ||
1742 fs.File.StatError ||
1743 fs.File.ChmodError ||
1744 fs.Dir.ReadLinkError;
1745
1746 const Kind = enum { file, link };
1747
1748 fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
1749 _ = context;
1750 return std.mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path);
1751 }
1752};
1753
1754/// Strips root directory name from file system path.
1755fn stripRoot(fs_path: []const u8, root_dir: []const u8) []const u8 {
1756 if (root_dir.len == 0 or fs_path.len <= root_dir.len) return fs_path;
1757
1758 if (std.mem.eql(u8, fs_path[0..root_dir.len], root_dir) and fs.path.isSep(fs_path[root_dir.len])) {
1759 return fs_path[root_dir.len + 1 ..];
1760 }
1761
1762 return fs_path;
1763}
1764
1765/// Make a file system path identical independently of operating system path inconsistencies.
1766/// This converts backslashes into forward slashes.
1767fn normalizePathAlloc(arena: Allocator, pkg_path: []const u8) ![]const u8 {
1768 const normalized = try arena.dupe(u8, pkg_path);
1769 if (fs.path.sep == canonical_sep) return normalized;
1770 normalizePath(normalized);
1771 return normalized;
1772}
1773
1774const canonical_sep = fs.path.sep_posix;
1775
1776fn normalizePath(bytes: []u8) void {
1777 assert(fs.path.sep != canonical_sep);
1778 std.mem.replaceScalar(u8, bytes, fs.path.sep, canonical_sep);
1779}
1780
1781const Filter = struct {
1782 include_paths: std.StringArrayHashMapUnmanaged(void) = .empty,
1783
1784 /// sub_path is relative to the package root.
1785 pub fn includePath(self: Filter, sub_path: []const u8) bool {
1786 if (self.include_paths.count() == 0) return true;
1787 if (self.include_paths.contains("")) return true;
1788 if (self.include_paths.contains(".")) return true;
1789 if (self.include_paths.contains(sub_path)) return true;
1790
1791 // Check if any included paths are parent directories of sub_path.
1792 var dirname = sub_path;
1793 while (std.fs.path.dirname(dirname)) |next_dirname| {
1794 if (self.include_paths.contains(next_dirname)) return true;
1795 dirname = next_dirname;
1796 }
1797
1798 return false;
1799 }
1800
1801 test includePath {
1802 const gpa = std.testing.allocator;
1803 var filter: Filter = .{};
1804 defer filter.include_paths.deinit(gpa);
1805
1806 try filter.include_paths.put(gpa, "src", {});
1807 try std.testing.expect(filter.includePath("src/core/unix/SDL_poll.c"));
1808 try std.testing.expect(!filter.includePath(".gitignore"));
1809 }
1810};
1811
1812pub fn depDigest(pkg_root: Cache.Path, cache_root: Cache.Directory, dep: Manifest.Dependency) ?Package.Hash {
1813 if (dep.hash) |h| return .fromSlice(h);
1814
1815 switch (dep.location) {
1816 .url => return null,
1817 .path => |rel_path| {
1818 var buf: [fs.max_path_bytes]u8 = undefined;
1819 var fba = std.heap.FixedBufferAllocator.init(&buf);
1820 const new_root = pkg_root.resolvePosix(fba.allocator(), rel_path) catch
1821 return null;
1822 return relativePathDigest(new_root, cache_root);
1823 },
1824 }
1825}
1826
1827// Detects executable header: ELF or Macho-O magic header or shebang line.
1828const FileHeader = struct {
1829 header: [4]u8 = undefined,
1830 bytes_read: usize = 0,
1831
1832 pub fn update(self: *FileHeader, buf: []const u8) void {
1833 if (self.bytes_read >= self.header.len) return;
1834 const n = @min(self.header.len - self.bytes_read, buf.len);
1835 @memcpy(self.header[self.bytes_read..][0..n], buf[0..n]);
1836 self.bytes_read += n;
1837 }
1838
1839 fn isScript(self: *FileHeader) bool {
1840 const shebang = "#!";
1841 return std.mem.eql(u8, self.header[0..@min(self.bytes_read, shebang.len)], shebang);
1842 }
1843
1844 fn isElf(self: *FileHeader) bool {
1845 const elf_magic = std.elf.MAGIC;
1846 return std.mem.eql(u8, self.header[0..@min(self.bytes_read, elf_magic.len)], elf_magic);
1847 }
1848
1849 fn isMachO(self: *FileHeader) bool {
1850 if (self.bytes_read < 4) return false;
1851 const magic_number = std.mem.readInt(u32, &self.header, builtin.cpu.arch.endian());
1852 return magic_number == std.macho.MH_MAGIC or
1853 magic_number == std.macho.MH_MAGIC_64 or
1854 magic_number == std.macho.FAT_MAGIC or
1855 magic_number == std.macho.FAT_MAGIC_64 or
1856 magic_number == std.macho.MH_CIGAM or
1857 magic_number == std.macho.MH_CIGAM_64 or
1858 magic_number == std.macho.FAT_CIGAM or
1859 magic_number == std.macho.FAT_CIGAM_64;
1860 }
1861
1862 pub fn isExecutable(self: *FileHeader) bool {
1863 return self.isScript() or self.isElf() or self.isMachO();
1864 }
1865};
1866
1867test FileHeader {
1868 var h: FileHeader = .{};
1869 try std.testing.expect(!h.isExecutable());
1870
1871 const elf_magic = std.elf.MAGIC;
1872 h.update(elf_magic[0..2]);
1873 try std.testing.expect(!h.isExecutable());
1874 h.update(elf_magic[2..4]);
1875 try std.testing.expect(h.isExecutable());
1876
1877 h.update(elf_magic[2..4]);
1878 try std.testing.expect(h.isExecutable());
1879
1880 const macho64_magic_bytes = [_]u8{ 0xCF, 0xFA, 0xED, 0xFE };
1881 h.bytes_read = 0;
1882 h.update(&macho64_magic_bytes);
1883 try std.testing.expect(h.isExecutable());
1884
1885 const macho64_cigam_bytes = [_]u8{ 0xFE, 0xED, 0xFA, 0xCF };
1886 h.bytes_read = 0;
1887 h.update(&macho64_cigam_bytes);
1888 try std.testing.expect(h.isExecutable());
1889}
1890
1891// Result of the `unpackResource` operation. Enables collecting errors from
1892// tar/git diagnostic, filtering that errors by manifest inclusion rules and
1893// emitting remaining errors to an `ErrorBundle`.
1894const UnpackResult = struct {
1895 errors: []Error = undefined,
1896 errors_count: usize = 0,
1897 root_error_message: []const u8 = "",
1898
1899 // A non empty value means that the package contents are inside a
1900 // sub-directory indicated by the named path.
1901 root_dir: []const u8 = "",
1902
1903 const Error = union(enum) {
1904 unable_to_create_sym_link: struct {
1905 code: anyerror,
1906 file_name: []const u8,
1907 link_name: []const u8,
1908 },
1909 unable_to_create_file: struct {
1910 code: anyerror,
1911 file_name: []const u8,
1912 },
1913 unsupported_file_type: struct {
1914 file_name: []const u8,
1915 file_type: u8,
1916 },
1917
1918 fn excluded(self: Error, filter: Filter) bool {
1919 const file_name = switch (self) {
1920 .unable_to_create_file => |info| info.file_name,
1921 .unable_to_create_sym_link => |info| info.file_name,
1922 .unsupported_file_type => |info| info.file_name,
1923 };
1924 return !filter.includePath(file_name);
1925 }
1926 };
1927
1928 fn allocErrors(self: *UnpackResult, arena: std.mem.Allocator, n: usize, root_error_message: []const u8) !void {
1929 self.root_error_message = try arena.dupe(u8, root_error_message);
1930 self.errors = try arena.alloc(UnpackResult.Error, n);
1931 }
1932
1933 fn hasErrors(self: *UnpackResult) bool {
1934 return self.errors_count > 0;
1935 }
1936
1937 fn unableToCreateFile(self: *UnpackResult, file_name: []const u8, err: anyerror) void {
1938 self.errors[self.errors_count] = .{ .unable_to_create_file = .{
1939 .code = err,
1940 .file_name = file_name,
1941 } };
1942 self.errors_count += 1;
1943 }
1944
1945 fn unableToCreateSymLink(self: *UnpackResult, file_name: []const u8, link_name: []const u8, err: anyerror) void {
1946 self.errors[self.errors_count] = .{ .unable_to_create_sym_link = .{
1947 .code = err,
1948 .file_name = file_name,
1949 .link_name = link_name,
1950 } };
1951 self.errors_count += 1;
1952 }
1953
1954 fn unsupportedFileType(self: *UnpackResult, file_name: []const u8, file_type: u8) void {
1955 self.errors[self.errors_count] = .{ .unsupported_file_type = .{
1956 .file_name = file_name,
1957 .file_type = file_type,
1958 } };
1959 self.errors_count += 1;
1960 }
1961
1962 fn validate(self: *UnpackResult, f: *Fetch, filter: Filter) !void {
1963 if (self.errors_count == 0) return;
1964
1965 var unfiltered_errors: u32 = 0;
1966 for (self.errors) |item| {
1967 if (item.excluded(filter)) continue;
1968 unfiltered_errors += 1;
1969 }
1970 if (unfiltered_errors == 0) return;
1971
1972 // Emmit errors to an `ErrorBundle`.
1973 const eb = &f.error_bundle;
1974 try eb.addRootErrorMessage(.{
1975 .msg = try eb.addString(self.root_error_message),
1976 .src_loc = try f.srcLoc(f.location_tok),
1977 .notes_len = unfiltered_errors,
1978 });
1979 var note_i: u32 = try eb.reserveNotes(unfiltered_errors);
1980 for (self.errors) |item| {
1981 if (item.excluded(filter)) continue;
1982 switch (item) {
1983 .unable_to_create_sym_link => |info| {
1984 eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
1985 .msg = try eb.printString("unable to create symlink from '{s}' to '{s}': {s}", .{
1986 info.file_name, info.link_name, @errorName(info.code),
1987 }),
1988 }));
1989 },
1990 .unable_to_create_file => |info| {
1991 eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
1992 .msg = try eb.printString("unable to create file '{s}': {s}", .{
1993 info.file_name, @errorName(info.code),
1994 }),
1995 }));
1996 },
1997 .unsupported_file_type => |info| {
1998 eb.extra.items[note_i] = @intFromEnum(try eb.addErrorMessage(.{
1999 .msg = try eb.printString("file '{s}' has unsupported type '{c}'", .{
2000 info.file_name, info.file_type,
2001 }),
2002 }));
2003 },
2004 }
2005 note_i += 1;
2006 }
2007
2008 return error.FetchFailed;
2009 }
2010
2011 test validate {
2012 const gpa = std.testing.allocator;
2013 var arena_instance = std.heap.ArenaAllocator.init(gpa);
2014 defer arena_instance.deinit();
2015 const arena = arena_instance.allocator();
2016
2017 // fill UnpackResult with errors
2018 var res: UnpackResult = .{};
2019 try res.allocErrors(arena, 4, "unable to unpack");
2020 try std.testing.expectEqual(0, res.errors_count);
2021 res.unableToCreateFile("dir1/file1", error.File1);
2022 res.unableToCreateSymLink("dir2/file2", "filename", error.SymlinkError);
2023 res.unableToCreateFile("dir1/file3", error.File3);
2024 res.unsupportedFileType("dir2/file4", 'x');
2025 try std.testing.expectEqual(4, res.errors_count);
2026
2027 // create filter, includes dir2, excludes dir1
2028 var filter: Filter = .{};
2029 try filter.include_paths.put(arena, "dir2", {});
2030
2031 // init Fetch
2032 var fetch: Fetch = undefined;
2033 fetch.parent_manifest_ast = null;
2034 fetch.location_tok = 0;
2035 try fetch.error_bundle.init(gpa);
2036 defer fetch.error_bundle.deinit();
2037
2038 // validate errors with filter
2039 try std.testing.expectError(error.FetchFailed, res.validate(&fetch, filter));
2040
2041 // output errors to string
2042 var errors = try fetch.error_bundle.toOwnedBundle("");
2043 defer errors.deinit(gpa);
2044 var aw: Io.Writer.Allocating = .init(gpa);
2045 defer aw.deinit();
2046 try errors.renderToWriter(.{}, &aw.writer, .no_color);
2047 try std.testing.expectEqualStrings(
2048 \\error: unable to unpack
2049 \\ note: unable to create symlink from 'dir2/file2' to 'filename': SymlinkError
2050 \\ note: file 'dir2/file4' has unsupported type 'x'
2051 \\
2052 , aw.written());
2053 }
2054};
2055
2056test "tarball with duplicate paths" {
2057 // This tarball has duplicate path 'dir1/file1' to simulate case sensitve
2058 // file system on any file sytstem.
2059 //
2060 // duplicate_paths/
2061 // duplicate_paths/dir1/
2062 // duplicate_paths/dir1/file1
2063 // duplicate_paths/dir1/file1
2064 // duplicate_paths/build.zig.zon
2065 // duplicate_paths/src/
2066 // duplicate_paths/src/main.zig
2067 // duplicate_paths/src/root.zig
2068 // duplicate_paths/build.zig
2069 //
2070
2071 const gpa = std.testing.allocator;
2072 const io = std.testing.io;
2073 var tmp = std.testing.tmpDir(.{});
2074 defer tmp.cleanup();
2075
2076 const tarball_name = "duplicate_paths.tar.gz";
2077 try saveEmbedFile(tarball_name, tmp.dir);
2078 const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
2079 defer gpa.free(tarball_path);
2080
2081 // Run tarball fetch, expect to fail
2082 var fb: TestFetchBuilder = undefined;
2083 var fetch = try fb.build(gpa, io, tmp.dir, tarball_path);
2084 defer fb.deinit();
2085 try std.testing.expectError(error.FetchFailed, fetch.run());
2086
2087 try fb.expectFetchErrors(1,
2088 \\error: unable to unpack tarball
2089 \\ note: unable to create file 'dir1/file1': PathAlreadyExists
2090 \\
2091 );
2092}
2093
2094test "tarball with excluded duplicate paths" {
2095 // Same as previous tarball but has build.zig.zon wich excludes 'dir1'.
2096 //
2097 // .paths = .{
2098 // "build.zig",
2099 // "build.zig.zon",
2100 // "src",
2101 // }
2102 //
2103
2104 const gpa = std.testing.allocator;
2105 const io = std.testing.io;
2106 var tmp = std.testing.tmpDir(.{});
2107 defer tmp.cleanup();
2108
2109 const tarball_name = "duplicate_paths_excluded.tar.gz";
2110 try saveEmbedFile(tarball_name, tmp.dir);
2111 const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
2112 defer gpa.free(tarball_path);
2113
2114 // Run tarball fetch, should succeed
2115 var fb: TestFetchBuilder = undefined;
2116 var fetch = try fb.build(gpa, io, tmp.dir, tarball_path);
2117 defer fb.deinit();
2118 try fetch.run();
2119
2120 const hex_digest = Package.multiHashHexDigest(fetch.computed_hash.digest);
2121 try std.testing.expectEqualStrings(
2122 "12200bafe035cbb453dd717741b66e9f9d1e6c674069d06121dafa1b2e62eb6b22da",
2123 &hex_digest,
2124 );
2125
2126 const expected_files: []const []const u8 = &.{
2127 "build.zig",
2128 "build.zig.zon",
2129 "src/main.zig",
2130 "src/root.zig",
2131 };
2132 try fb.expectPackageFiles(expected_files);
2133}
2134
2135test "tarball without root folder" {
2136 // Tarball with root folder. Manifest excludes dir1 and dir2.
2137 //
2138 // build.zig
2139 // build.zig.zon
2140 // dir1/
2141 // dir1/file2
2142 // dir1/file1
2143 // dir2/
2144 // dir2/file2
2145 // src/
2146 // src/main.zig
2147 //
2148
2149 const gpa = std.testing.allocator;
2150 const io = std.testing.io;
2151
2152 var tmp = std.testing.tmpDir(.{});
2153 defer tmp.cleanup();
2154
2155 const tarball_name = "no_root.tar.gz";
2156 try saveEmbedFile(tarball_name, tmp.dir);
2157 const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
2158 defer gpa.free(tarball_path);
2159
2160 // Run tarball fetch, should succeed
2161 var fb: TestFetchBuilder = undefined;
2162 var fetch = try fb.build(gpa, io, tmp.dir, tarball_path);
2163 defer fb.deinit();
2164 try fetch.run();
2165
2166 const hex_digest = Package.multiHashHexDigest(fetch.computed_hash.digest);
2167 try std.testing.expectEqualStrings(
2168 "12209f939bfdcb8b501a61bb4a43124dfa1b2848adc60eec1e4624c560357562b793",
2169 &hex_digest,
2170 );
2171
2172 const expected_files: []const []const u8 = &.{
2173 "build.zig",
2174 "build.zig.zon",
2175 "src/main.zig",
2176 };
2177 try fb.expectPackageFiles(expected_files);
2178}
2179
2180test "set executable bit based on file content" {
2181 if (!std.fs.has_executable_bit) return error.SkipZigTest;
2182 const gpa = std.testing.allocator;
2183 const io = std.testing.io;
2184
2185 var tmp = std.testing.tmpDir(.{});
2186 defer tmp.cleanup();
2187
2188 const tarball_name = "executables.tar.gz";
2189 try saveEmbedFile(tarball_name, tmp.dir);
2190 const tarball_path = try std.fmt.allocPrint(gpa, ".zig-cache/tmp/{s}/{s}", .{ tmp.sub_path, tarball_name });
2191 defer gpa.free(tarball_path);
2192
2193 // $ tar -tvf executables.tar.gz
2194 // drwxrwxr-x 0 executables/
2195 // -rwxrwxr-x 170 executables/hello
2196 // lrwxrwxrwx 0 executables/hello_ln -> hello
2197 // -rw-rw-r-- 0 executables/file1
2198 // -rw-rw-r-- 17 executables/script_with_shebang_without_exec_bit
2199 // -rwxrwxr-x 7 executables/script_without_shebang
2200 // -rwxrwxr-x 17 executables/script
2201
2202 var fb: TestFetchBuilder = undefined;
2203 var fetch = try fb.build(gpa, io, tmp.dir, tarball_path);
2204 defer fb.deinit();
2205
2206 try fetch.run();
2207 try std.testing.expectEqualStrings(
2208 "1220fecb4c06a9da8673c87fe8810e15785f1699212f01728eadce094d21effeeef3",
2209 &Package.multiHashHexDigest(fetch.computed_hash.digest),
2210 );
2211
2212 var out = try fb.packageDir();
2213 defer out.close();
2214 const S = std.posix.S;
2215 // expect executable bit not set
2216 try std.testing.expect((try out.statFile("file1")).mode & S.IXUSR == 0);
2217 try std.testing.expect((try out.statFile("script_without_shebang")).mode & S.IXUSR == 0);
2218 // expect executable bit set
2219 try std.testing.expect((try out.statFile("hello")).mode & S.IXUSR != 0);
2220 try std.testing.expect((try out.statFile("script")).mode & S.IXUSR != 0);
2221 try std.testing.expect((try out.statFile("script_with_shebang_without_exec_bit")).mode & S.IXUSR != 0);
2222 try std.testing.expect((try out.statFile("hello_ln")).mode & S.IXUSR != 0);
2223
2224 //
2225 // $ ls -al zig-cache/tmp/OCz9ovUcstDjTC_U/zig-global-cache/p/1220fecb4c06a9da8673c87fe8810e15785f1699212f01728eadce094d21effeeef3
2226 // -rw-rw-r-- 1 0 Apr file1
2227 // -rwxrwxr-x 1 170 Apr hello
2228 // lrwxrwxrwx 1 5 Apr hello_ln -> hello
2229 // -rwxrwxr-x 1 17 Apr script
2230 // -rw-rw-r-- 1 7 Apr script_without_shebang
2231 // -rwxrwxr-x 1 17 Apr script_with_shebang_without_exec_bit
2232}
2233
2234fn saveEmbedFile(comptime tarball_name: []const u8, dir: fs.Dir) !void {
2235 //const tarball_name = "duplicate_paths_excluded.tar.gz";
2236 const tarball_content = @embedFile("Fetch/testdata/" ++ tarball_name);
2237 var tmp_file = try dir.createFile(tarball_name, .{});
2238 defer tmp_file.close();
2239 try tmp_file.writeAll(tarball_content);
2240}
2241
2242// Builds Fetch with required dependencies, clears dependencies on deinit().
2243const TestFetchBuilder = struct {
2244 http_client: std.http.Client,
2245 global_cache_directory: Cache.Directory,
2246 job_queue: Fetch.JobQueue,
2247 fetch: Fetch,
2248
2249 fn build(
2250 self: *TestFetchBuilder,
2251 allocator: std.mem.Allocator,
2252 io: Io,
2253 cache_parent_dir: std.fs.Dir,
2254 path_or_url: []const u8,
2255 ) !*Fetch {
2256 const cache_dir = try cache_parent_dir.makeOpenPath("zig-global-cache", .{});
2257
2258 self.http_client = .{ .allocator = allocator, .io = io };
2259 self.global_cache_directory = .{ .handle = cache_dir, .path = null };
2260
2261 self.job_queue = .{
2262 .io = io,
2263 .http_client = &self.http_client,
2264 .global_cache = self.global_cache_directory,
2265 .recursive = false,
2266 .read_only = false,
2267 .debug_hash = false,
2268 .work_around_btrfs_bug = false,
2269 .mode = .needed,
2270 };
2271
2272 self.fetch = .{
2273 .arena = std.heap.ArenaAllocator.init(allocator),
2274 .location = .{ .path_or_url = path_or_url },
2275 .location_tok = 0,
2276 .hash_tok = .none,
2277 .name_tok = 0,
2278 .lazy_status = .eager,
2279 .parent_package_root = Cache.Path{ .root_dir = Cache.Directory{ .handle = cache_dir, .path = null } },
2280 .parent_manifest_ast = null,
2281 .prog_node = std.Progress.Node.none,
2282 .job_queue = &self.job_queue,
2283 .omit_missing_hash_error = true,
2284 .allow_missing_paths_field = false,
2285 .allow_missing_fingerprint = true, // so we can keep using the old testdata .tar.gz
2286 .allow_name_string = true, // so we can keep using the old testdata .tar.gz
2287 .use_latest_commit = true,
2288
2289 .package_root = undefined,
2290 .error_bundle = undefined,
2291 .manifest = null,
2292 .manifest_ast = undefined,
2293 .computed_hash = undefined,
2294 .has_build_zig = false,
2295 .oom_flag = false,
2296 .latest_commit = null,
2297
2298 .module = null,
2299 };
2300 return &self.fetch;
2301 }
2302
2303 fn deinit(self: *TestFetchBuilder) void {
2304 self.fetch.deinit();
2305 self.job_queue.deinit();
2306 self.fetch.prog_node.end();
2307 self.global_cache_directory.handle.close();
2308 self.http_client.deinit();
2309 }
2310
2311 fn packageDir(self: *TestFetchBuilder) !fs.Dir {
2312 const root = self.fetch.package_root;
2313 return try root.root_dir.handle.openDir(root.sub_path, .{ .iterate = true });
2314 }
2315
2316 // Test helper, asserts thet package dir constains expected_files.
2317 // expected_files must be sorted.
2318 fn expectPackageFiles(self: *TestFetchBuilder, expected_files: []const []const u8) !void {
2319 var package_dir = try self.packageDir();
2320 defer package_dir.close();
2321
2322 var actual_files: std.ArrayList([]u8) = .empty;
2323 defer actual_files.deinit(std.testing.allocator);
2324 defer for (actual_files.items) |file| std.testing.allocator.free(file);
2325 var walker = try package_dir.walk(std.testing.allocator);
2326 defer walker.deinit();
2327 while (try walker.next()) |entry| {
2328 if (entry.kind != .file) continue;
2329 const path = try std.testing.allocator.dupe(u8, entry.path);
2330 errdefer std.testing.allocator.free(path);
2331 std.mem.replaceScalar(u8, path, std.fs.path.sep, '/');
2332 try actual_files.append(std.testing.allocator, path);
2333 }
2334 std.mem.sortUnstable([]u8, actual_files.items, {}, struct {
2335 fn lessThan(_: void, a: []u8, b: []u8) bool {
2336 return std.mem.lessThan(u8, a, b);
2337 }
2338 }.lessThan);
2339
2340 try std.testing.expectEqual(expected_files.len, actual_files.items.len);
2341 for (expected_files, 0..) |file_name, i| {
2342 try std.testing.expectEqualStrings(file_name, actual_files.items[i]);
2343 }
2344 try std.testing.expectEqualDeep(expected_files, actual_files.items);
2345 }
2346
2347 // Test helper, asserts that fetch has failed with `msg` error message.
2348 fn expectFetchErrors(self: *TestFetchBuilder, notes_len: usize, msg: []const u8) !void {
2349 var errors = try self.fetch.error_bundle.toOwnedBundle("");
2350 defer errors.deinit(std.testing.allocator);
2351
2352 const em = errors.getErrorMessage(errors.getMessages()[0]);
2353 try std.testing.expectEqual(1, em.count);
2354 if (notes_len > 0) {
2355 try std.testing.expectEqual(notes_len, em.notes_len);
2356 }
2357 var aw: Io.Writer.Allocating = .init(std.testing.allocator);
2358 defer aw.deinit();
2359 try errors.renderToWriter(.{}, &aw.writer, .no_color);
2360 try std.testing.expectEqualStrings(msg, aw.written());
2361 }
2362};
2363
2364test {
2365 _ = Filter;
2366 _ = FileType;
2367 _ = UnpackResult;
2368}