Commit 9fffffb07b

Jacob G-W <jacoblevgw@gmail.com>
2021-06-20 03:10:22
fix code broken from previous commit
1 parent b83b388
Changed files (162)
lib
src
test
tools
lib/std/atomic/Atomic.zig
@@ -232,6 +232,7 @@ test "Atomic.loadUnchecked" {
 
 test "Atomic.storeUnchecked" {
     inline for (atomicIntTypes()) |Int| {
+        _ = Int;
         var x = Atomic(usize).init(5);
         x.storeUnchecked(10);
         try testing.expectEqual(x.loadUnchecked(), 10);
@@ -250,6 +251,7 @@ test "Atomic.load" {
 test "Atomic.store" {
     inline for (atomicIntTypes()) |Int| {
         inline for (.{ .Unordered, .Monotonic, .Release, .SeqCst }) |ordering| {
+            _ = Int;
             var x = Atomic(usize).init(5);
             x.store(10, ordering);
             try testing.expectEqual(x.load(.SeqCst), 10);
lib/std/build/InstallRawStep.zig
@@ -139,6 +139,7 @@ const BinaryElfOutput = struct {
     }
 
     fn segmentSortCompare(context: void, left: *BinaryElfSegment, right: *BinaryElfSegment) bool {
+        _ = context;
         if (left.physicalAddress < right.physicalAddress) {
             return true;
         }
@@ -149,6 +150,7 @@ const BinaryElfOutput = struct {
     }
 
     fn sectionSortCompare(context: void, left: *BinaryElfSection, right: *BinaryElfSection) bool {
+        _ = context;
         return left.binaryOffset < right.binaryOffset;
     }
 };
lib/std/crypto/25519/ed25519.zig
@@ -346,7 +346,7 @@ test "ed25519 test vectors" {
             .expected = error.IdentityElement, // 11 - small-order A
         },
     };
-    for (entries) |entry, i| {
+    for (entries) |entry| {
         var msg: [entry.msg_hex.len / 2]u8 = undefined;
         _ = try fmt.hexToBytes(&msg, entry.msg_hex);
         var public_key: [32]u8 = undefined;
lib/std/crypto/pcurves/p256/scalar.zig
@@ -63,6 +63,7 @@ pub fn add(a: CompressedScalar, b: CompressedScalar, endian: builtin.Endian) Non
 
 /// Return -s (mod L)
 pub fn neg(s: CompressedScalar, endian: builtin.Endian) NonCanonicalError!CompressedScalar {
+    _ = s;
     return (try Scalar.fromBytes(a, endian)).neg().toBytes(endian);
 }
 
lib/std/crypto/blake3.zig
@@ -394,6 +394,7 @@ pub const Blake3 = struct {
     /// Construct a new `Blake3` for the key derivation function. The context
     /// string should be hardcoded, globally unique, and application-specific.
     pub fn initKdf(context: []const u8, options: KdfOptions) Blake3 {
+        _ = options;
         var context_hasher = Blake3.init_internal(IV, DERIVE_KEY_CONTEXT);
         context_hasher.update(context);
         var context_key: [KEY_LEN]u8 = undefined;
lib/std/crypto/gimli.zig
@@ -219,6 +219,7 @@ pub const Hash = struct {
     const Self = @This();
 
     pub fn init(options: Options) Self {
+        _ = options;
         return Self{
             .state = State{ .data = [_]u32{0} ** (State.BLOCKBYTES / 4) },
             .buf_off = 0,
lib/std/crypto/md5.zig
@@ -45,6 +45,7 @@ pub const Md5 = struct {
     total_len: u64,
 
     pub fn init(options: Options) Self {
+        _ = options;
         return Self{
             .s = [_]u32{
                 0x67452301,
lib/std/crypto/sha1.zig
@@ -43,6 +43,7 @@ pub const Sha1 = struct {
     total_len: u64 = 0,
 
     pub fn init(options: Options) Self {
+        _ = options;
         return Self{
             .s = [_]u32{
                 0x67452301,
lib/std/crypto/sha2.zig
@@ -95,6 +95,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
         total_len: u64 = 0,
 
         pub fn init(options: Options) Self {
+            _ = options;
             return Self{
                 .s = [_]u32{
                     params.iv0,
@@ -462,6 +463,7 @@ fn Sha2x64(comptime params: Sha2Params64) type {
         total_len: u128 = 0,
 
         pub fn init(options: Options) Self {
+            _ = options;
             return Self{
                 .s = [_]u64{
                     params.iv0,
lib/std/crypto/sha3.zig
@@ -28,6 +28,7 @@ fn Keccak(comptime bits: usize, comptime delim: u8) type {
         rate: usize,
 
         pub fn init(options: Options) Self {
+            _ = options;
             return Self{ .s = [_]u8{0} ** 200, .offset = 0, .rate = 200 - (bits / 4) };
         }
 
lib/std/crypto/tlcsprng.zig
@@ -84,7 +84,7 @@ fn tlsCsprngFill(_: *const std.rand.Random, buffer: []u8) void {
                 os.MAP_PRIVATE | os.MAP_ANONYMOUS,
                 -1,
                 0,
-            ) catch |err| {
+            ) catch {
                 // Could not allocate memory for the local state, fall back to
                 // the OS syscall.
                 return fillWithOsEntropy(buffer);
lib/std/event/loop.zig
@@ -345,7 +345,7 @@ pub const Loop = struct {
                 );
                 errdefer windows.CloseHandle(self.os_data.io_port);
 
-                for (self.eventfd_resume_nodes) |*eventfd_node, i| {
+                for (self.eventfd_resume_nodes) |*eventfd_node| {
                     eventfd_node.* = std.atomic.Stack(ResumeNode.EventFd).Node{
                         .data = ResumeNode.EventFd{
                             .base = ResumeNode{
lib/std/fs/path.zig
@@ -579,7 +579,7 @@ pub fn resolveWindows(allocator: *Allocator, paths: []const []const u8) ![]u8 {
     // Now we know the disk designator to use, if any, and what kind it is. And our result
     // is big enough to append all the paths to.
     var correct_disk_designator = true;
-    for (paths[first_index..]) |p, i| {
+    for (paths[first_index..]) |p| {
         const parsed = windowsParsePath(p);
 
         if (parsed.kind != WindowsPath.Kind.None) {
@@ -660,7 +660,7 @@ pub fn resolvePosix(allocator: *Allocator, paths: []const []const u8) ![]u8 {
     }
     errdefer allocator.free(result);
 
-    for (paths[first_index..]) |p, i| {
+    for (paths[first_index..]) |p| {
         var it = mem.tokenize(p, "/");
         while (it.next()) |component| {
             if (mem.eql(u8, component, ".")) {
lib/std/fs/test.zig
@@ -541,6 +541,7 @@ test "makePath, put some files in it, deleteTree" {
     try tmp.dir.writeFile("os_test_tmp" ++ fs.path.sep_str ++ "b" ++ fs.path.sep_str ++ "file2.txt", "blah");
     try tmp.dir.deleteTree("os_test_tmp");
     if (tmp.dir.openDir("os_test_tmp", .{})) |dir| {
+        _ = dir;
         @panic("expected error");
     } else |err| {
         try testing.expect(err == error.FileNotFound);
@@ -638,6 +639,7 @@ test "access file" {
 
     try tmp.dir.makePath("os_test_tmp");
     if (tmp.dir.access("os_test_tmp" ++ fs.path.sep_str ++ "file.txt", .{})) |ok| {
+        _ = ok;
         @panic("expected error");
     } else |err| {
         try testing.expect(err == error.FileNotFound);
lib/std/fs/wasi.zig
@@ -36,6 +36,8 @@ pub const PreopenType = union(PreopenTypeTag) {
     }
 
     pub fn format(self: Self, comptime fmt: []const u8, options: std.fmt.FormatOptions, out_stream: anytype) !void {
+        _ = fmt;
+        _ = options;
         try out_stream.print("PreopenType{{ ", .{});
         switch (self) {
             PreopenType.Dir => |path| try out_stream.print(".Dir = '{}'", .{std.zig.fmtId(path)}),
lib/std/hash/cityhash.zig
@@ -375,6 +375,7 @@ fn SMHasherTest(comptime hash_fn: anytype) u32 {
 }
 
 fn CityHash32hashIgnoreSeed(str: []const u8, seed: u32) u32 {
+    _ = seed;
     return CityHash32.hash(str);
 }
 
lib/std/heap/arena_allocator.zig
@@ -66,6 +66,8 @@ pub const ArenaAllocator = struct {
     }
 
     fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+        _ = len_align;
+        _ = ra;
         const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
 
         var cur_node = if (self.state.buffer_list.first) |first_node| first_node else try self.createNode(0, n + ptr_align);
@@ -95,6 +97,9 @@ pub const ArenaAllocator = struct {
     }
 
     fn resize(allocator: *Allocator, buf: []u8, buf_align: u29, new_len: usize, len_align: u29, ret_addr: usize) Allocator.Error!usize {
+        _ = buf_align;
+        _ = len_align;
+        _ = ret_addr;
         const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
 
         const cur_node = self.state.buffer_list.first orelse return error.OutOfMemory;
lib/std/heap/log_to_writer_allocator.zig
@@ -37,9 +37,9 @@ pub fn LogToWriterAllocator(comptime Writer: type) type {
             const self = @fieldParentPtr(Self, "allocator", allocator);
             self.writer.print("alloc : {}", .{len}) catch {};
             const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
-            if (result) |buff| {
+            if (result) |_| {
                 self.writer.print(" success!\n", .{}) catch {};
-            } else |err| {
+            } else |_| {
                 self.writer.print(" failure!\n", .{}) catch {};
             }
             return result;
lib/std/heap/logging_allocator.zig
@@ -65,7 +65,7 @@ pub fn ScopedLoggingAllocator(
         ) error{OutOfMemory}![]u8 {
             const self = @fieldParentPtr(Self, "allocator", allocator);
             const result = self.parent_allocator.allocFn(self.parent_allocator, len, ptr_align, len_align, ra);
-            if (result) |buff| {
+            if (result) |_| {
                 logHelper(
                     success_log_level,
                     "alloc - success - len: {}, ptr_align: {}, len_align: {}",
lib/std/io/bit_reader.zig
@@ -149,7 +149,7 @@ pub fn BitReader(endian: builtin.Endian, comptime ReaderType: type) type {
             var out_bits_total = @as(usize, 0);
             //@NOTE: I'm not sure this is a good idea, maybe alignToByte should be forced
             if (self.bit_count > 0) {
-                for (buffer) |*b, i| {
+                for (buffer) |*b| {
                     b.* = try self.readBits(u8, u8_bit_count, &out_bits);
                     out_bits_total += out_bits;
                 }
lib/std/io/bit_writer.zig
@@ -128,7 +128,7 @@ pub fn BitWriter(endian: builtin.Endian, comptime WriterType: type) type {
         pub fn write(self: *Self, buffer: []const u8) Error!usize {
             // TODO: I'm not sure this is a good idea, maybe flushBits should be forced
             if (self.bit_count > 0) {
-                for (buffer) |b, i|
+                for (buffer) |b|
                     try self.writeBits(b, u8_bit_count);
                 return buffer.len;
             }
lib/std/math/big/int.zig
@@ -458,6 +458,7 @@ pub const Mutable = struct {
     /// If `allocator` is provided, it will be used for temporary storage to improve
     /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
     pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
+        _ = opt_allocator;
         assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
 
         mem.set(Limb, rma.limbs, 0);
@@ -676,6 +677,7 @@ pub const Mutable = struct {
     ///
     /// `limbs_buffer` is used for temporary storage during the operation.
     pub fn gcdNoAlias(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void {
+        _ = limbs_buffer;
         assert(rma.limbs.ptr != x.limbs.ptr); // illegal aliasing
         assert(rma.limbs.ptr != y.limbs.ptr); // illegal aliasing
         return gcdLehmer(rma, x, y, allocator);
@@ -1141,6 +1143,7 @@ pub const Const = struct {
         options: std.fmt.FormatOptions,
         out_stream: anytype,
     ) !void {
+        _ = options;
         comptime var radix = 10;
         comptime var case: std.fmt.Case = .lower;
 
@@ -1618,6 +1621,7 @@ pub const Managed = struct {
     /// Converts self to a string in the requested base. Memory is allocated from the provided
     /// allocator and not the one present in self.
     pub fn toString(self: Managed, allocator: *Allocator, base: u8, case: std.fmt.Case) ![]u8 {
+        _ = allocator;
         if (base < 2 or base > 16) return error.InvalidBase;
         return self.toConst().toStringAlloc(self.allocator, base, case);
     }
lib/std/mem/Allocator.zig
@@ -55,6 +55,10 @@ pub fn noResize(
     len_align: u29,
     ret_addr: usize,
 ) Error!usize {
+    _ = self;
+    _ = buf_align;
+    _ = len_align;
+    _ = ret_addr;
     if (new_len > buf.len)
         return error.OutOfMemory;
     return new_len;
lib/std/meta/trailer_flags.zig
@@ -108,6 +108,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
         }
 
         pub fn offset(self: Self, p: [*]align(@alignOf(Fields)) const u8, comptime field: FieldEnum) usize {
+            _ = p;
             var off: usize = 0;
             inline for (@typeInfo(Fields).Struct.fields) |field_info, i| {
                 const active = (self.bits & (1 << i)) != 0;
lib/std/os/bits/linux.zig
@@ -1286,7 +1286,7 @@ pub const CAP_BLOCK_SUSPEND = 36;
 pub const CAP_AUDIT_READ = 37;
 pub const CAP_LAST_CAP = CAP_AUDIT_READ;
 
-pub fn cap_valid(u8: x) bool {
+pub fn cap_valid(x: u8) bool {
     return x >= 0 and x <= CAP_LAST_CAP;
 }
 
lib/std/os/linux/bpf.zig
@@ -1513,7 +1513,7 @@ pub fn map_create(map_type: MapType, key_size: u32, value_size: u32, max_entries
         EINVAL => error.MapTypeOrAttrInvalid,
         ENOMEM => error.SystemResources,
         EPERM => error.AccessDenied,
-        else => |err| unexpectedErrno(rc),
+        else => unexpectedErrno(rc),
     };
 }
 
@@ -1539,7 +1539,7 @@ pub fn map_lookup_elem(fd: fd_t, key: []const u8, value: []u8) !void {
         EINVAL => return error.FieldInAttrNeedsZeroing,
         ENOENT => return error.NotFound,
         EPERM => return error.AccessDenied,
-        else => |err| return unexpectedErrno(rc),
+        else => return unexpectedErrno(rc),
     }
 }
 
lib/std/os/linux/io_uring.zig
@@ -284,6 +284,7 @@ pub const IO_Uring = struct {
     }
 
     fn copy_cqes_ready(self: *IO_Uring, cqes: []io_uring_cqe, wait_nr: u32) u32 {
+        _ = wait_nr;
         const ready = self.cq_ready();
         const count = std.math.min(cqes.len, ready);
         var head = self.cq.head.*;
@@ -320,6 +321,7 @@ pub const IO_Uring = struct {
     /// Not idempotent, calling more than once will result in other CQEs being lost.
     /// Matches the implementation of cqe_seen() in liburing.
     pub fn cqe_seen(self: *IO_Uring, cqe: *io_uring_cqe) void {
+        _ = cqe;
         self.cq_advance(1);
     }
 
@@ -728,6 +730,7 @@ pub const CompletionQueue = struct {
     }
 
     pub fn deinit(self: *CompletionQueue) void {
+        _ = self;
         // A no-op since we now share the mmap with the submission queue.
         // Here for symmetry with the submission queue, and for any future feature support.
     }
lib/std/os/linux/mips.zig
@@ -18,6 +18,7 @@ pub fn syscall0(number: SYS) usize {
 }
 
 pub fn syscall_pipe(fd: *[2]i32) usize {
+    _ = fd;
     return asm volatile (
         \\ .set noat
         \\ .set noreorder
lib/std/os/uefi/protocols/managed_network_protocol.zig
@@ -35,6 +35,7 @@ pub const ManagedNetworkProtocol = extern struct {
     /// Translates an IP multicast address to a hardware (MAC) multicast address.
     /// This function may be unsupported in some MNP implementations.
     pub fn mcastIpToMac(self: *const ManagedNetworkProtocol, ipv6flag: bool, ipaddress: *const c_void, mac_address: *MacAddress) Status {
+        _ = mac_address;
         return self._mcast_ip_to_mac(self, ipv6flag, ipaddress);
     }
 
lib/std/os/linux.zig
@@ -70,6 +70,7 @@ fn splitValueLE64(val: i64) [2]u32 {
     };
 }
 fn splitValueBE64(val: i64) [2]u32 {
+    _ = val;
     return [2]u32{
         @truncate(u32, u >> 32),
         @truncate(u32, u),
@@ -1022,7 +1023,7 @@ pub fn sendmmsg(fd: i32, msgvec: [*]mmsghdr_const, vlen: u32, flags: u32) usize
         for (msgvec[0..kvlen]) |*msg, i| {
             var size: i32 = 0;
             const msg_iovlen = @intCast(usize, msg.msg_hdr.msg_iovlen); // kernel side this is treated as unsigned
-            for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov, j| {
+            for (msg.msg_hdr.msg_iov[0..msg_iovlen]) |iov| {
                 if (iov.iov_len > std.math.maxInt(i32) or @addWithOverflow(i32, size, @intCast(i32, iov.iov_len), &size)) {
                     // batch-send all messages up to the current message
                     if (next_unsent < i) {
lib/std/os/test.zig
@@ -353,6 +353,7 @@ test "spawn threads" {
 }
 
 fn start1(ctx: void) u8 {
+    _ = ctx;
     return 0;
 }
 
@@ -379,6 +380,7 @@ test "thread local storage" {
 
 threadlocal var x: i32 = 1234;
 fn testTls(context: void) !void {
+    _ = context;
     if (x != 1234) return error.TlsBadStartValue;
     x += 1;
     if (x != 1235) return error.TlsBadEndValue;
@@ -425,6 +427,7 @@ const IterFnError = error{
 };
 
 fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
+    _ = size;
     // Count how many libraries are loaded
     counter.* += @as(usize, 1);
 
@@ -731,6 +734,7 @@ test "sigaction" {
 
     const S = struct {
         fn handler(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const c_void) callconv(.C) void {
+            _ = ctx_ptr;
             // Check that we received the correct signal.
             switch (native_os) {
                 .netbsd => {
lib/std/os/uefi.zig
@@ -37,6 +37,7 @@ pub const Guid = extern struct {
         options: std.fmt.FormatOptions,
         writer: anytype,
     ) !void {
+        _ = options;
         if (f.len == 0) {
             return std.fmt.format(writer, "{x:0>8}-{x:0>4}-{x:0>4}-{x:0>2}{x:0>2}-{x:0>12}", .{
                 self.time_low,
lib/std/special/compiler_rt/atomics.zig
@@ -80,18 +80,21 @@ var spinlocks: SpinlockTable = SpinlockTable{};
 // Those work on any object no matter the pointer alignment nor its size.
 
 fn __atomic_load(size: u32, src: [*]u8, dest: [*]u8, model: i32) callconv(.C) void {
+    _ = model;
     var sl = spinlocks.get(@ptrToInt(src));
     defer sl.release();
     @memcpy(dest, src, size);
 }
 
 fn __atomic_store(size: u32, dest: [*]u8, src: [*]u8, model: i32) callconv(.C) void {
+    _ = model;
     var sl = spinlocks.get(@ptrToInt(dest));
     defer sl.release();
     @memcpy(dest, src, size);
 }
 
 fn __atomic_exchange(size: u32, ptr: [*]u8, val: [*]u8, old: [*]u8, model: i32) callconv(.C) void {
+    _ = model;
     var sl = spinlocks.get(@ptrToInt(ptr));
     defer sl.release();
     @memcpy(old, ptr, size);
@@ -106,6 +109,8 @@ fn __atomic_compare_exchange(
     success: i32,
     failure: i32,
 ) callconv(.C) i32 {
+    _ = success;
+    _ = failure;
     var sl = spinlocks.get(@ptrToInt(ptr));
     defer sl.release();
     for (ptr[0..size]) |b, i| {
@@ -135,6 +140,7 @@ comptime {
 fn atomicLoadFn(comptime T: type) fn (*T, i32) callconv(.C) T {
     return struct {
         fn atomic_load_N(src: *T, model: i32) callconv(.C) T {
+            _ = model;
             if (@sizeOf(T) > largest_atomic_size) {
                 var sl = spinlocks.get(@ptrToInt(src));
                 defer sl.release();
@@ -162,6 +168,7 @@ comptime {
 fn atomicStoreFn(comptime T: type) fn (*T, T, i32) callconv(.C) void {
     return struct {
         fn atomic_store_N(dst: *T, value: T, model: i32) callconv(.C) void {
+            _ = model;
             if (@sizeOf(T) > largest_atomic_size) {
                 var sl = spinlocks.get(@ptrToInt(dst));
                 defer sl.release();
@@ -189,6 +196,7 @@ comptime {
 fn atomicExchangeFn(comptime T: type) fn (*T, T, i32) callconv(.C) T {
     return struct {
         fn atomic_exchange_N(ptr: *T, val: T, model: i32) callconv(.C) T {
+            _ = model;
             if (@sizeOf(T) > largest_atomic_size) {
                 var sl = spinlocks.get(@ptrToInt(ptr));
                 defer sl.release();
@@ -218,6 +226,8 @@ comptime {
 fn atomicCompareExchangeFn(comptime T: type) fn (*T, *T, T, i32, i32) callconv(.C) i32 {
     return struct {
         fn atomic_compare_exchange_N(ptr: *T, expected: *T, desired: T, success: i32, failure: i32) callconv(.C) i32 {
+            _ = success;
+            _ = failure;
             if (@sizeOf(T) > largest_atomic_size) {
                 var sl = spinlocks.get(@ptrToInt(ptr));
                 defer sl.release();
@@ -255,6 +265,7 @@ comptime {
 fn fetchFn(comptime T: type, comptime op: builtin.AtomicRmwOp) fn (*T, T, i32) callconv(.C) T {
     return struct {
         pub fn fetch_op_N(ptr: *T, val: T, model: i32) callconv(.C) T {
+            _ = model;
             if (@sizeOf(T) > largest_atomic_size) {
                 var sl = spinlocks.get(@ptrToInt(ptr));
                 defer sl.release();
lib/std/special/compiler_rt/comparedf2_test.zig
@@ -101,6 +101,7 @@ const test_vectors = init: {
 
 test "compare f64" {
     for (test_vectors) |vector, i| {
+        _ = i;
         try std.testing.expect(test__cmpdf2(vector));
     }
 }
lib/std/special/compiler_rt/comparesf2_test.zig
@@ -101,6 +101,7 @@ const test_vectors = init: {
 
 test "compare f32" {
     for (test_vectors) |vector, i| {
+        _ = i;
         try std.testing.expect(test__cmpsf2(vector));
     }
 }
lib/std/special/c.zig
@@ -160,6 +160,7 @@ fn strncmp(_l: [*:0]const u8, _r: [*:0]const u8, _n: usize) callconv(.C) c_int {
 }
 
 fn strerror(errnum: c_int) callconv(.C) [*:0]const u8 {
+    _ = errnum;
     return "TODO strerror implementation";
 }
 
@@ -173,6 +174,7 @@ test "strncmp" {
 // Avoid dragging in the runtime safety mechanisms into this .o file,
 // unless we're trying to test this file.
 pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
+    _ = error_return_trace;
     if (builtin.is_test) {
         @setCold(true);
         std.debug.panic("{s}", .{msg});
lib/std/special/compiler_rt.zig
@@ -602,6 +602,7 @@ pub usingnamespace @import("compiler_rt/atomics.zig");
 // Avoid dragging in the runtime safety mechanisms into this .o file,
 // unless we're trying to test this file.
 pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
+    _ = error_return_trace;
     @setCold(true);
     if (is_test) {
         std.debug.panic("{s}", .{msg});
lib/std/special/ssp.zig
@@ -27,6 +27,8 @@ extern fn memmove(dest: ?[*]u8, src: ?[*]const u8, n: usize) callconv(.C) ?[*]u8
 
 // Avoid dragging in the runtime safety mechanisms into this .o file.
 pub fn panic(msg: []const u8, error_return_trace: ?*builtin.StackTrace) noreturn {
+    _ = msg;
+    _ = error_return_trace;
     @setCold(true);
     if (@hasDecl(std.os, "abort"))
         std.os.abort();
lib/std/Thread/Condition.zig
@@ -40,12 +40,18 @@ else
 
 pub const SingleThreadedCondition = struct {
     pub fn wait(cond: *SingleThreadedCondition, mutex: *Mutex) void {
+        _ = cond;
+        _ = mutex;
         unreachable; // deadlock detected
     }
 
-    pub fn signal(cond: *SingleThreadedCondition) void {}
+    pub fn signal(cond: *SingleThreadedCondition) void {
+        _ = cond;
+    }
 
-    pub fn broadcast(cond: *SingleThreadedCondition) void {}
+    pub fn broadcast(cond: *SingleThreadedCondition) void {
+        _ = cond;
+    }
 };
 
 pub const WindowsCondition = struct {
lib/std/Thread/StaticResetEvent.zig
@@ -105,6 +105,7 @@ pub const DebugEvent = struct {
     }
 
     pub fn timedWait(ev: *DebugEvent, timeout: u64) TimedWaitResult {
+        _ = timeout;
         switch (ev.state) {
             .unset => return .timed_out,
             .set => return .event_set,
@@ -174,7 +175,10 @@ pub const AtomicEvent = struct {
     };
 
     pub const SpinFutex = struct {
-        fn wake(waiters: *u32, wake_count: u32) void {}
+        fn wake(waiters: *u32, wake_count: u32) void {
+            _ = waiters;
+            _ = wake_count;
+        }
 
         fn wait(waiters: *u32, timeout: ?u64) !void {
             var timer: time.Timer = undefined;
@@ -193,6 +197,7 @@ pub const AtomicEvent = struct {
 
     pub const LinuxFutex = struct {
         fn wake(waiters: *u32, wake_count: u32) void {
+            _ = wake_count;
             const waiting = std.math.maxInt(i32); // wake_count
             const ptr = @ptrCast(*const i32, waiters);
             const rc = linux.futex_wake(ptr, linux.FUTEX_WAKE | linux.FUTEX_PRIVATE_FLAG, waiting);
lib/std/x/net/ip.zig
@@ -53,6 +53,8 @@ pub const Address = union(enum) {
         opts: fmt.FormatOptions,
         writer: anytype,
     ) !void {
+        _ = opts;
+        _ = layout;
         switch (self) {
             .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
             .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
lib/std/x/os/net.zig
@@ -143,6 +143,7 @@ pub const IPv4 = extern struct {
         opts: fmt.FormatOptions,
         writer: anytype,
     ) !void {
+        _ = opts;
         if (comptime layout.len != 0 and layout[0] != 's') {
             @compileError("Unsupported format specifier for IPv4 type '" ++ layout ++ "'.");
         }
@@ -352,6 +353,7 @@ pub const IPv6 = extern struct {
         opts: fmt.FormatOptions,
         writer: anytype,
     ) !void {
+        _ = opts;
         const specifier = comptime &[_]u8{if (layout.len == 0) 'x' else switch (layout[0]) {
             'x', 'X' => |specifier| specifier,
             's' => 'x',
lib/std/x/os/socket.zig
@@ -117,7 +117,7 @@ pub const Socket = struct {
             };
         }
 
-        /// Returns the number of bytes that make up the `sockaddr` equivalent to the address. 
+        /// Returns the number of bytes that make up the `sockaddr` equivalent to the address.
         pub fn getNativeSize(self: Socket.Address) u32 {
             return switch (self) {
                 .ipv4 => @sizeOf(os.sockaddr_in),
@@ -132,6 +132,8 @@ pub const Socket = struct {
             opts: fmt.FormatOptions,
             writer: anytype,
         ) !void {
+            _ = opts;
+            _ = layout;
             switch (self) {
                 .ipv4 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
                 .ipv6 => |address| try fmt.format(writer, "{}:{}", .{ address.host, address.port }),
@@ -280,7 +282,7 @@ pub const Socket = struct {
     ///
     /// Microsoft's documentation and glibc denote the fields to be unsigned
     /// short's on Windows, whereas glibc and musl denote the fields to be
-    /// int's on every other platform. 
+    /// int's on every other platform.
     pub const Linger = extern struct {
         pub const Field = switch (native_os.tag) {
             .windows => c_ushort,
lib/std/x/os/socket_windows.zig
@@ -292,6 +292,7 @@ pub fn Mixin(comptime Socket: type) type {
         /// with a set of flags specified. It returns the number of bytes that were
         /// read into the buffer provided.
         pub fn readMessage(self: Socket, msg: *Socket.Message, flags: u32) !usize {
+            _ = flags;
             const call = try windows.loadWinsockExtensionFunction(ws2_32.LPFN_WSARECVMSG, self.fd, ws2_32.WSAID_WSARECVMSG);
 
             var num_bytes: u32 = undefined;
@@ -367,16 +368,19 @@ pub fn Mixin(comptime Socket: type) type {
 
         /// Query and return the latest cached error on the socket.
         pub fn getError(self: Socket) !void {
+            _ = self;
             return {};
         }
 
         /// Query the read buffer size of the socket.
         pub fn getReadBufferSize(self: Socket) !u32 {
+            _ = self;
             return 0;
         }
 
         /// Query the write buffer size of the socket.
         pub fn getWriteBufferSize(self: Socket) !u32 {
+            _ = self;
             return 0;
         }
 
@@ -406,7 +410,7 @@ pub fn Mixin(comptime Socket: type) type {
 
         /// On connection-oriented sockets, have keep-alive messages be sent periodically. The timing in which keep-alive
         /// messages are sent are dependant on operating system settings. It returns `error.UnsupportedSocketOption` if
-        /// the host does not support periodically sending keep-alive messages on connection-oriented sockets. 
+        /// the host does not support periodically sending keep-alive messages on connection-oriented sockets.
         pub fn setKeepAlive(self: Socket, enabled: bool) !void {
             return self.setOption(ws2_32.SOL_SOCKET, ws2_32.SO_KEEPALIVE, mem.asBytes(&@as(u32, @boolToInt(enabled))));
         }
@@ -438,7 +442,7 @@ pub fn Mixin(comptime Socket: type) type {
 
         /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
         /// set on a non-blocking socket.
-        /// 
+        ///
         /// Set a timeout on the socket that is to occur if no messages are successfully written
         /// to its bound destination after a specified number of milliseconds. A subsequent write
         /// to the socket will thereafter return `error.WouldBlock` should the timeout be exceeded.
@@ -448,7 +452,7 @@ pub fn Mixin(comptime Socket: type) type {
 
         /// WARNING: Timeouts only affect blocking sockets. It is undefined behavior if a timeout is
         /// set on a non-blocking socket.
-        /// 
+        ///
         /// Set a timeout on the socket that is to occur if no messages are successfully read
         /// from its bound destination after a specified number of milliseconds. A subsequent
         /// read from the socket will thereafter return `error.WouldBlock` should the timeout be
lib/std/zig/system/macos.zig
@@ -68,10 +68,10 @@ pub fn detect(target_os: *Target.Os) !void {
                     return;
                 }
                 continue;
-            } else |err| {
+            } else |_| {
                 return error.OSVersionDetectionFail;
             }
-        } else |err| {
+        } else |_| {
             return error.OSVersionDetectionFail;
         }
     }
lib/std/zig/system/x86.zig
@@ -28,6 +28,7 @@ inline fn hasMask(input: u32, mask: u32) bool {
 }
 
 pub fn detectNativeCpuAndFeatures(arch: Target.Cpu.Arch, os: Target.Os, cross_target: CrossTarget) Target.Cpu {
+    _ = cross_target;
     var cpu = Target.Cpu{
         .arch = arch,
         .model = Target.Cpu.Model.generic(arch),
lib/std/zig/ast.zig
@@ -1866,6 +1866,7 @@ pub const Tree = struct {
     }
 
     fn fullStructInit(tree: Tree, info: full.StructInit.Ast) full.StructInit {
+        _ = tree;
         var result: full.StructInit = .{
             .ast = info,
         };
lib/std/zig/c_builtins.zig
@@ -136,6 +136,7 @@ pub inline fn __builtin_strcmp(s1: [*c]const u8, s2: [*c]const u8) c_int {
 }
 
 pub inline fn __builtin_object_size(ptr: ?*const c_void, ty: c_int) usize {
+    _ = ptr;
     // clang semantics match gcc's: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
     // If it is not possible to determine which objects ptr points to at compile time,
     // __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
@@ -186,6 +187,7 @@ pub inline fn __builtin_memcpy(
 /// The return value of __builtin_expect is `expr`. `c` is the expected value
 /// of `expr` and is used as a hint to the compiler in C. Here it is unused.
 pub inline fn __builtin_expect(expr: c_long, c: c_long) c_long {
+    _ = c;
     return expr;
 }
 
lib/std/zig/fmt.zig
@@ -8,6 +8,7 @@ pub fn formatId(
     options: std.fmt.FormatOptions,
     writer: anytype,
 ) !void {
+    _ = fmt;
     if (isValidId(bytes)) {
         return writer.writeAll(bytes);
     }
@@ -41,6 +42,7 @@ pub fn formatEscapes(
     options: std.fmt.FormatOptions,
     writer: anytype,
 ) !void {
+    _ = options;
     for (bytes) |byte| switch (byte) {
         '\n' => try writer.writeAll("\\n"),
         '\r' => try writer.writeAll("\\r"),
lib/std/zig/parse.zig
@@ -2104,7 +2104,7 @@ const Parser = struct {
     /// FnCallArguments <- LPAREN ExprList RPAREN
     /// ExprList <- (Expr COMMA)* Expr?
     fn parseSuffixExpr(p: *Parser) !Node.Index {
-        if (p.eatToken(.keyword_async)) |async_token| {
+        if (p.eatToken(.keyword_async)) |_| {
             var res = try p.expectPrimaryTypeExpr();
             while (true) {
                 const node = try p.parseSuffixOp(res);
lib/std/zig/system.zig
@@ -200,6 +200,7 @@ pub const NativePaths = struct {
     }
 
     fn appendArray(self: *NativePaths, array: *ArrayList([:0]u8), s: []const u8) !void {
+        _ = self;
         const item = try array.allocator.dupeZ(u8, s);
         errdefer array.allocator.free(item);
         try array.append(item);
@@ -332,7 +333,7 @@ pub const NativeTargetInfo = struct {
                     if (std.builtin.Version.parse(buf[0 .. len - 1])) |ver| {
                         os.version_range.semver.min = ver;
                         os.version_range.semver.max = ver;
-                    } else |err| {
+                    } else |_| {
                         return error.OSVersionDetectionFail;
                     }
                 },
lib/std/array_hash_map.zig
@@ -40,9 +40,11 @@ pub fn StringArrayHashMapUnmanaged(comptime V: type) type {
 
 pub const StringContext = struct {
     pub fn hash(self: @This(), s: []const u8) u32 {
+        _ = self;
         return hashString(s);
     }
     pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
+        _ = self;
         return eqlString(a, b);
     }
 };
@@ -1335,6 +1337,7 @@ pub fn ArrayHashMapUnmanaged(
         }
 
         fn removeSlot(self: *Self, removed_slot: usize, header: *IndexHeader, comptime I: type, indexes: []Index(I)) void {
+            _ = self;
             const start_index = removed_slot +% 1;
             const end_index = start_index +% indexes.len;
 
@@ -1626,6 +1629,7 @@ pub fn ArrayHashMapUnmanaged(
             }
         }
         fn dumpIndex(self: Self, header: *IndexHeader, comptime I: type) void {
+            _ = self;
             const p = std.debug.print;
             p("  index len=0x{x} type={}\n", .{ header.length(), header.capacityIndexType() });
             const indexes = header.indexes(I);
@@ -1918,7 +1922,7 @@ test "iterator hash map" {
     try testing.expect(count == 3);
     try testing.expect(it.next() == null);
 
-    for (buffer) |v, i| {
+    for (buffer) |_, i| {
         try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
     }
 
@@ -1930,7 +1934,7 @@ test "iterator hash map" {
         if (count >= 2) break;
     }
 
-    for (buffer[0..2]) |v, i| {
+    for (buffer[0..2]) |_, i| {
         try testing.expect(buffer[@intCast(usize, keys[i])] == values[i]);
     }
 
@@ -2154,6 +2158,7 @@ test "compile everything" {
 pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
     return struct {
         fn hash(ctx: Context, key: K) u32 {
+            _ = ctx;
             return getAutoHashFn(usize, void)({}, @ptrToInt(key));
         }
     }.hash;
@@ -2162,6 +2167,7 @@ pub fn getHashPtrAddrFn(comptime K: type, comptime Context: type) (fn (Context,
 pub fn getTrivialEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
     return struct {
         fn eql(ctx: Context, a: K, b: K) bool {
+            _ = ctx;
             return a == b;
         }
     }.eql;
@@ -2177,6 +2183,7 @@ pub fn AutoContext(comptime K: type) type {
 pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K) u32) {
     return struct {
         fn hash(ctx: Context, key: K) u32 {
+            _ = ctx;
             if (comptime trait.hasUniqueRepresentation(K)) {
                 return @truncate(u32, Wyhash.hash(0, std.mem.asBytes(&key)));
             } else {
@@ -2191,6 +2198,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
 pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
     return struct {
         fn eql(ctx: Context, a: K, b: K) bool {
+            _ = ctx;
             return meta.eql(a, b);
         }
     }.eql;
@@ -2217,6 +2225,7 @@ pub fn autoEqlIsCheap(comptime K: type) bool {
 pub fn getAutoHashStratFn(comptime K: type, comptime Context: type, comptime strategy: std.hash.Strategy) (fn (Context, K) u32) {
     return struct {
         fn hash(ctx: Context, key: K) u32 {
+            _ = ctx;
             var hasher = Wyhash.init(0);
             std.hash.autoHashStrat(&hasher, key, strategy);
             return @truncate(u32, hasher.final());
lib/std/bit_set.zig
@@ -84,6 +84,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
 
         /// Returns the number of bits in this bit set
         pub inline fn capacity(self: Self) usize {
+            _ = self;
             return bit_length;
         }
 
@@ -311,6 +312,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
 
         /// Returns the number of bits in this bit set
         pub inline fn capacity(self: Self) usize {
+            _ = self;
             return bit_length;
         }
 
@@ -373,7 +375,7 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
 
         /// Flips every bit in the bit set.
         pub fn toggleAll(self: *Self) void {
-            for (self.masks) |*mask, i| {
+            for (self.masks) |*mask| {
                 mask.* = ~mask.*;
             }
 
@@ -642,7 +644,7 @@ pub const DynamicBitSetUnmanaged = struct {
         if (bit_length == 0) return;
 
         const num_masks = numMasks(self.bit_length);
-        for (self.masks[0..num_masks]) |*mask, i| {
+        for (self.masks[0..num_masks]) |*mask| {
             mask.* = ~mask.*;
         }
 
lib/std/build.zig
@@ -390,6 +390,7 @@ pub const Builder = struct {
     }
 
     pub fn version(self: *const Builder, major: u32, minor: u32, patch: u32) LibExeObjStep.SharedLibKind {
+        _ = self;
         return .{
             .versioned = .{
                 .major = major,
@@ -543,7 +544,7 @@ pub const Builder = struct {
                     return null;
                 },
                 .scalar => |s| {
-                    const n = std.fmt.parseFloat(T, s) catch |err| {
+                    const n = std.fmt.parseFloat(T, s) catch {
                         warn("Expected -D{s} to be a float of type {s}.\n\n", .{ name, @typeName(T) });
                         self.markInvalidUserInput();
                         return null;
@@ -3129,7 +3130,9 @@ pub const Step = struct {
         self.dependencies.append(other) catch unreachable;
     }
 
-    fn makeNoOp(self: *Step) anyerror!void {}
+    fn makeNoOp(self: *Step) anyerror!void {
+        _ = self;
+    }
 
     pub fn cast(step: *Step, comptime T: type) ?*T {
         if (step.id == T.base_id) {
lib/std/builtin.zig
@@ -65,6 +65,8 @@ pub const StackTrace = struct {
         options: std.fmt.FormatOptions,
         writer: anytype,
     ) !void {
+        _ = fmt;
+        _ = options;
         var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator);
         defer arena.deinit();
         const debug_info = std.debug.getSelfDebugInfo() catch |err| {
@@ -521,6 +523,7 @@ pub const Version = struct {
         options: std.fmt.FormatOptions,
         out_stream: anytype,
     ) !void {
+        _ = options;
         if (fmt.len == 0) {
             if (self.patch == 0) {
                 if (self.minor == 0) {
lib/std/comptime_string_map.zig
@@ -23,6 +23,7 @@ pub fn ComptimeStringMap(comptime V: type, comptime kvs: anytype) type {
         var sorted_kvs: [kvs.len]KV = undefined;
         const lenAsc = (struct {
             fn lenAsc(context: void, a: KV, b: KV) bool {
+                _ = context;
                 return a.key.len < b.key.len;
             }
         }).lenAsc;
lib/std/debug.zig
@@ -325,6 +325,7 @@ pub fn writeStackTrace(
     debug_info: *DebugInfo,
     tty_config: TTY.Config,
 ) !void {
+    _ = allocator;
     if (builtin.strip_debug_info) return error.MissingDebugInfo;
     var frame_index: usize = 0;
     var frames_left: usize = std.math.min(stack_trace.index, stack_trace.instruction_addresses.len);
@@ -930,6 +931,7 @@ const MachoSymbol = struct {
     }
 
     fn addressLessThan(context: void, lhs: MachoSymbol, rhs: MachoSymbol) bool {
+        _ = context;
         return lhs.address() < rhs.address();
     }
 };
@@ -1134,6 +1136,7 @@ pub const DebugInfo = struct {
 
         if (os.dl_iterate_phdr(&ctx, anyerror, struct {
             fn callback(info: *os.dl_phdr_info, size: usize, context: *CtxTy) !void {
+                _ = size;
                 // The base address is too high
                 if (context.address < info.dlpi_addr)
                     return;
@@ -1189,6 +1192,8 @@ pub const DebugInfo = struct {
     }
 
     fn lookupModuleHaiku(self: *DebugInfo, address: usize) !*ModuleDebugInfo {
+        _ = self;
+        _ = address;
         @panic("TODO implement lookup module for Haiku");
     }
 };
lib/std/dwarf.zig
@@ -283,6 +283,7 @@ fn parseFormValueBlock(allocator: *mem.Allocator, in_stream: anytype, endian: bu
 }
 
 fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed: bool, endian: builtin.Endian, comptime size: i32) !FormValue {
+    _ = allocator;
     // TODO: Please forgive me, I've worked around zig not properly spilling some intermediate values here.
     // `nosuspend` should be removed from all the function calls once it is fixed.
     return FormValue{
@@ -310,6 +311,7 @@ fn parseFormValueConstant(allocator: *mem.Allocator, in_stream: anytype, signed:
 
 // TODO the nosuspends here are workarounds
 fn parseFormValueRef(allocator: *mem.Allocator, in_stream: anytype, endian: builtin.Endian, size: i32) !FormValue {
+    _ = allocator;
     return FormValue{
         .Ref = switch (size) {
             1 => try nosuspend in_stream.readInt(u8, endian),
@@ -453,13 +455,13 @@ pub const DwarfInfo = struct {
                                 if (this_die_obj.getAttr(AT_name)) |_| {
                                     const name = try this_die_obj.getAttrString(di, AT_name);
                                     break :x name;
-                                } else if (this_die_obj.getAttr(AT_abstract_origin)) |ref| {
+                                } else if (this_die_obj.getAttr(AT_abstract_origin)) |_| {
                                     // Follow the DIE it points to and repeat
                                     const ref_offset = try this_die_obj.getAttrRef(AT_abstract_origin);
                                     if (ref_offset > next_offset) return error.InvalidDebugInfo;
                                     try seekable.seekTo(this_unit_offset + ref_offset);
                                     this_die_obj = (try di.parseDie(in, abbrev_table, is_64)) orelse return error.InvalidDebugInfo;
-                                } else if (this_die_obj.getAttr(AT_specification)) |ref| {
+                                } else if (this_die_obj.getAttr(AT_specification)) |_| {
                                     // Follow the DIE it points to and repeat
                                     const ref_offset = try this_die_obj.getAttrRef(AT_specification);
                                     if (ref_offset > next_offset) return error.InvalidDebugInfo;
lib/std/dynamic_library.zig
@@ -66,6 +66,7 @@ pub fn get_DYNAMIC() ?[*]elf.Dyn {
 }
 
 pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
+    _ = phdrs;
     const _DYNAMIC = get_DYNAMIC() orelse {
         // No PT_DYNAMIC means this is either a statically-linked program or a
         // badly corrupted dynamically-linked one.
lib/std/enums.zig
@@ -18,7 +18,7 @@ const EnumField = std.builtin.TypeInfo.EnumField;
 pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_default: ?Data) type {
     const StructField = std.builtin.TypeInfo.StructField;
     var fields: []const StructField = &[_]StructField{};
-    for (std.meta.fields(E)) |field, i| {
+    for (std.meta.fields(E)) |field| {
         fields = fields ++ &[_]StructField{.{
             .name = field.name,
             .field_type = Data,
@@ -144,7 +144,7 @@ pub fn directEnumArrayDefault(
 ) [directEnumArrayLen(E, max_unused_slots)]Data {
     const len = comptime directEnumArrayLen(E, max_unused_slots);
     var result: [len]Data = if (default) |d| [_]Data{d} ** len else undefined;
-    inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f, i| {
+    inline for (@typeInfo(@TypeOf(init_values)).Struct.fields) |f| {
         const enum_value = @field(E, f.name);
         const index = @intCast(usize, @enumToInt(enum_value));
         result[index] = @field(init_values, f.name);
@@ -334,6 +334,7 @@ pub fn EnumArray(comptime E: type, comptime V: type) type {
 /// TODO: Once #8169 is fixed, consider switching this param
 /// back to an optional.
 pub fn NoExtension(comptime Self: type) type {
+    _ = Self;
     return NoExt;
 }
 const NoExt = struct {};
@@ -729,6 +730,7 @@ test "std.enums.ensureIndexer" {
 }
 
 fn ascByValue(ctx: void, comptime a: EnumField, comptime b: EnumField) bool {
+    _ = ctx;
     return a.value < b.value;
 }
 pub fn EnumIndexer(comptime E: type) type {
@@ -743,9 +745,11 @@ pub fn EnumIndexer(comptime E: type) type {
             pub const Key = E;
             pub const count: usize = 0;
             pub fn indexOf(e: E) usize {
+                _ = e;
                 unreachable;
             }
             pub fn keyForIndex(i: usize) E {
+                _ = i;
                 unreachable;
             }
         };
lib/std/fmt.zig
@@ -369,6 +369,7 @@ pub fn format(
 }
 
 pub fn formatAddress(value: anytype, options: FormatOptions, writer: anytype) @TypeOf(writer).Error!void {
+    _ = options;
     const T = @TypeOf(value);
 
     switch (@typeInfo(T)) {
@@ -553,7 +554,7 @@ pub fn formatType(
             .Many, .C => {
                 if (actual_fmt.len == 0)
                     @compileError("cannot format pointer without a specifier (i.e. {s} or {*})");
-                if (ptr_info.sentinel) |sentinel| {
+                if (ptr_info.sentinel) |_| {
                     return formatType(mem.span(value), actual_fmt, options, writer, max_depth);
                 }
                 if (ptr_info.child == u8) {
@@ -741,6 +742,8 @@ fn formatSliceHexImpl(comptime case: Case) type {
             options: std.fmt.FormatOptions,
             writer: anytype,
         ) !void {
+            _ = fmt;
+            _ = options;
             var buf: [2]u8 = undefined;
 
             for (bytes) |c| {
@@ -777,6 +780,8 @@ fn formatSliceEscapeImpl(comptime case: Case) type {
             options: std.fmt.FormatOptions,
             writer: anytype,
         ) !void {
+            _ = fmt;
+            _ = options;
             var buf: [4]u8 = undefined;
 
             buf[0] = '\\';
@@ -820,6 +825,7 @@ fn formatSizeImpl(comptime radix: comptime_int) type {
             options: FormatOptions,
             writer: anytype,
         ) !void {
+            _ = fmt;
             if (value == 0) {
                 return writer.writeAll("0B");
             }
@@ -903,6 +909,7 @@ pub fn formatAsciiChar(
     options: FormatOptions,
     writer: anytype,
 ) !void {
+    _ = options;
     return writer.writeAll(@as(*const [1]u8, &c));
 }
 
@@ -1362,6 +1369,8 @@ pub fn formatIntBuf(out_buf: []u8, value: anytype, base: u8, case: Case, options
 }
 
 fn formatDuration(ns: u64, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
+    _ = fmt;
+    _ = options;
     var ns_remaining = ns;
     inline for (.{
         .{ .ns = 365 * std.time.ns_per_day, .sep = 'y' },
@@ -2152,6 +2161,7 @@ test "custom" {
             options: FormatOptions,
             writer: anytype,
         ) !void {
+            _ = options;
             if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "p")) {
                 return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
             } else if (comptime std.mem.eql(u8, fmt, "d")) {
@@ -2340,6 +2350,7 @@ test "formatType max_depth" {
             options: FormatOptions,
             writer: anytype,
         ) !void {
+            _ = options;
             if (fmt.len == 0) {
                 return std.fmt.format(writer, "({d:.3},{d:.3})", .{ self.x, self.y });
             } else {
lib/std/fs.zig
@@ -1541,7 +1541,7 @@ pub const Dir = struct {
         self: Dir,
         target_path: []const u8,
         sym_link_path: []const u8,
-        flags: SymLinkFlags,
+        _: SymLinkFlags,
     ) !void {
         return os.symlinkatWasi(target_path, self.fd, sym_link_path);
     }
@@ -1879,6 +1879,7 @@ pub const Dir = struct {
     /// * NtDll prefixed
     /// TODO currently this ignores `flags`.
     pub fn accessW(self: Dir, sub_path_w: [*:0]const u16, flags: File.OpenFlags) AccessError!void {
+        _ = flags;
         return os.faccessatW(self.fd, sub_path_w, 0, 0);
     }
 
lib/std/hash_map.zig
@@ -29,6 +29,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
 
     return struct {
         fn hash(ctx: Context, key: K) u64 {
+            _ = ctx;
             if (comptime trait.hasUniqueRepresentation(K)) {
                 return Wyhash.hash(0, std.mem.asBytes(&key));
             } else {
@@ -43,6 +44,7 @@ pub fn getAutoHashFn(comptime K: type, comptime Context: type) (fn (Context, K)
 pub fn getAutoEqlFn(comptime K: type, comptime Context: type) (fn (Context, K, K) bool) {
     return struct {
         fn eql(ctx: Context, a: K, b: K) bool {
+            _ = ctx;
             return meta.eql(a, b);
         }
     }.eql;
@@ -78,9 +80,11 @@ pub fn StringHashMapUnmanaged(comptime V: type) type {
 
 pub const StringContext = struct {
     pub fn hash(self: @This(), s: []const u8) u64 {
+        _ = self;
         return hashString(s);
     }
     pub fn eql(self: @This(), a: []const u8, b: []const u8) bool {
+        _ = self;
         return eqlString(a, b);
     }
 };
@@ -1887,9 +1891,11 @@ test "std.hash_map clone" {
 test "std.hash_map getOrPutAdapted" {
     const AdaptedContext = struct {
         fn eql(self: @This(), adapted_key: []const u8, test_key: u64) bool {
+            _ = self;
             return std.fmt.parseInt(u64, adapted_key, 10) catch unreachable == test_key;
         }
         fn hash(self: @This(), adapted_key: []const u8) u64 {
+            _ = self;
             const key = std.fmt.parseInt(u64, adapted_key, 10) catch unreachable;
             return (AutoContext(u64){}).hash(key);
         }
lib/std/heap.zig
@@ -108,6 +108,8 @@ const CAllocator = struct {
         len_align: u29,
         return_address: usize,
     ) error{OutOfMemory}![]u8 {
+        _ = allocator;
+        _ = return_address;
         assert(len > 0);
         assert(std.math.isPowerOfTwo(alignment));
 
@@ -134,6 +136,9 @@ const CAllocator = struct {
         len_align: u29,
         return_address: usize,
     ) Allocator.Error!usize {
+        _ = allocator;
+        _ = buf_align;
+        _ = return_address;
         if (new_len == 0) {
             alignedFree(buf.ptr);
             return 0;
@@ -178,6 +183,9 @@ fn rawCAlloc(
     len_align: u29,
     ret_addr: usize,
 ) Allocator.Error![]u8 {
+    _ = self;
+    _ = len_align;
+    _ = ret_addr;
     assert(ptr_align <= @alignOf(std.c.max_align_t));
     const ptr = @ptrCast([*]u8, c.malloc(len) orelse return error.OutOfMemory);
     return ptr[0..len];
@@ -191,6 +199,9 @@ fn rawCResize(
     len_align: u29,
     ret_addr: usize,
 ) Allocator.Error!usize {
+    _ = self;
+    _ = old_align;
+    _ = ret_addr;
     if (new_len == 0) {
         c.free(buf.ptr);
         return 0;
@@ -231,6 +242,8 @@ pub var next_mmap_addr_hint: ?[*]align(mem.page_size) u8 = null;
 
 const PageAllocator = struct {
     fn alloc(allocator: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
+        _ = allocator;
+        _ = ra;
         assert(n > 0);
         const aligned_len = mem.alignForward(n, mem.page_size);
 
@@ -334,6 +347,9 @@ const PageAllocator = struct {
         len_align: u29,
         return_address: usize,
     ) Allocator.Error!usize {
+        _ = allocator;
+        _ = buf_align;
+        _ = return_address;
         const new_size_aligned = mem.alignForward(new_size, mem.page_size);
 
         if (builtin.os.tag == .windows) {
@@ -482,6 +498,8 @@ const WasmPageAllocator = struct {
     }
 
     fn alloc(allocator: *Allocator, len: usize, alignment: u29, len_align: u29, ra: usize) error{OutOfMemory}![]u8 {
+        _ = allocator;
+        _ = ra;
         const page_count = nPages(len);
         const page_idx = try allocPages(page_count, alignment);
         return @intToPtr([*]u8, page_idx * mem.page_size)[0..alignPageAllocLen(page_count * mem.page_size, len, len_align)];
@@ -542,6 +560,9 @@ const WasmPageAllocator = struct {
         len_align: u29,
         return_address: usize,
     ) error{OutOfMemory}!usize {
+        _ = allocator;
+        _ = buf_align;
+        _ = return_address;
         const aligned_len = mem.alignForward(buf.len, mem.page_size);
         if (new_len > aligned_len) return error.OutOfMemory;
         const current_n = nPages(aligned_len);
@@ -588,6 +609,7 @@ pub const HeapAllocator = switch (builtin.os.tag) {
             len_align: u29,
             return_address: usize,
         ) error{OutOfMemory}![]u8 {
+            _ = return_address;
             const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
 
             const amt = n + ptr_align - 1 + @sizeOf(usize);
@@ -622,6 +644,8 @@ pub const HeapAllocator = switch (builtin.os.tag) {
             len_align: u29,
             return_address: usize,
         ) error{OutOfMemory}!usize {
+            _ = buf_align;
+            _ = return_address;
             const self = @fieldParentPtr(HeapAllocator, "allocator", allocator);
             if (new_size == 0) {
                 os.windows.HeapFree(self.heap_handle.?, 0, @intToPtr(*c_void, getRecordPtr(buf).*));
@@ -694,6 +718,8 @@ pub const FixedBufferAllocator = struct {
     }
 
     fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+        _ = len_align;
+        _ = ra;
         const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
         const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse
             return error.OutOfMemory;
@@ -716,6 +742,8 @@ pub const FixedBufferAllocator = struct {
         len_align: u29,
         return_address: usize,
     ) Allocator.Error!usize {
+        _ = buf_align;
+        _ = return_address;
         const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);
         assert(self.ownsSlice(buf)); // sanity check
 
@@ -766,6 +794,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
             }
 
             fn alloc(allocator: *Allocator, n: usize, ptr_align: u29, len_align: u29, ra: usize) ![]u8 {
+                _ = len_align;
+                _ = ra;
                 const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);
                 var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
                 while (true) {
lib/std/io.zig
@@ -161,6 +161,7 @@ pub const null_writer = @as(NullWriter, .{ .context = {} });
 
 const NullWriter = Writer(void, error{}, dummyWrite);
 fn dummyWrite(context: void, data: []const u8) error{}!usize {
+    _ = context;
     return data.len;
 }
 
lib/std/json.zig
@@ -1221,11 +1221,11 @@ test "json.token premature object close" {
 pub fn validate(s: []const u8) bool {
     var p = StreamingParser.init();
 
-    for (s) |c, i| {
+    for (s) |c| {
         var token1: ?Token = undefined;
         var token2: ?Token = undefined;
 
-        p.feed(c, &token1, &token2) catch |err| {
+        p.feed(c, &token1, &token2) catch {
             return false;
         };
     }
@@ -1410,7 +1410,7 @@ fn parsedEqual(a: anytype, b: @TypeOf(a)) bool {
             if (a == null or b == null) return false;
             return parsedEqual(a.?, b.?);
         },
-        .Union => |unionInfo| {
+        .Union => {
             if (info.tag_type) |UnionTag| {
                 const tag_a = std.meta.activeTag(a);
                 const tag_b = std.meta.activeTag(b);
@@ -1771,7 +1771,7 @@ fn parseInternal(comptime T: type, token: Token, tokens: *TokenStream, options:
                             const source_slice = stringToken.slice(tokens.slice, tokens.i - 1);
                             switch (stringToken.escapes) {
                                 .None => return allocator.dupe(u8, source_slice),
-                                .Some => |some_escapes| {
+                                .Some => {
                                     const output = try allocator.alloc(u8, stringToken.decodedLength());
                                     errdefer allocator.free(output);
                                     try unescapeValidString(output, source_slice);
@@ -2391,7 +2391,7 @@ pub const Parser = struct {
         const slice = s.slice(input, i);
         switch (s.escapes) {
             .None => return Value{ .String = if (p.copy_strings) try allocator.dupe(u8, slice) else slice },
-            .Some => |some_escapes| {
+            .Some => {
                 const output = try allocator.alloc(u8, s.decodedLength());
                 errdefer allocator.free(output);
                 try unescapeValidString(output, slice);
@@ -2401,6 +2401,7 @@ pub const Parser = struct {
     }
 
     fn parseNumber(p: *Parser, n: std.meta.TagPayload(Token, Token.Number), input: []const u8, i: usize) !Value {
+        _ = p;
         return if (n.is_integer)
             Value{
                 .Integer = std.fmt.parseInt(i64, n.slice(input, i), 10) catch |e| switch (e) {
@@ -2815,7 +2816,7 @@ pub fn stringify(
             if (child_options.whitespace) |*child_whitespace| {
                 child_whitespace.indent_level += 1;
             }
-            inline for (S.fields) |Field, field_i| {
+            inline for (S.fields) |Field| {
                 // don't include void fields
                 if (Field.field_type == void) continue;
 
@@ -3114,6 +3115,7 @@ test "stringify struct with custom stringifier" {
             options: StringifyOptions,
             out_stream: anytype,
         ) !void {
+            _ = value;
             try out_stream.writeAll("[\"something special\",");
             try stringify(42, options, out_stream);
             try out_stream.writeByte(']');
lib/std/linked_list.zig
@@ -63,7 +63,7 @@ pub fn SinglyLinkedList(comptime T: type) type {
             pub fn countChildren(node: *const Node) usize {
                 var count: usize = 0;
                 var it: ?*const Node = node.next;
-                while (it) |n| : (it = n.next) {
+                while (it) |_| : (it = n.next) {
                     count += 1;
                 }
                 return count;
lib/std/mem.zig
@@ -139,6 +139,11 @@ var failAllocator = Allocator{
     .resizeFn = Allocator.noResize,
 };
 fn failAllocatorAlloc(self: *Allocator, n: usize, alignment: u29, len_align: u29, ra: usize) Allocator.Error![]u8 {
+    _ = self;
+    _ = n;
+    _ = alignment;
+    _ = len_align;
+    _ = ra;
     return error.OutOfMemory;
 }
 
lib/std/meta.zig
@@ -843,6 +843,7 @@ pub const refAllDecls = @compileError("refAllDecls has been moved from std.meta
 pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const Decl {
     const S = struct {
         fn declNameLessThan(context: void, lhs: *const Decl, rhs: *const Decl) bool {
+            _ = context;
             return mem.lessThan(u8, lhs.name, rhs.name);
         }
     };
lib/std/multi_array_list.zig
@@ -92,6 +92,7 @@ pub fn MultiArrayList(comptime S: type) type {
             }
             const Sort = struct {
                 fn lessThan(trash: *i32, lhs: Data, rhs: Data) bool {
+                    _ = trash;
                     return lhs.alignment > rhs.alignment;
                 }
             };
@@ -221,7 +222,7 @@ pub fn MultiArrayList(comptime S: type) type {
         /// retain list ordering.
         pub fn swapRemove(self: *Self, index: usize) void {
             const slices = self.slice();
-            inline for (fields) |field_info, i| {
+            inline for (fields) |_, i| {
                 const field_slice = slices.items(@intToEnum(Field, i));
                 field_slice[index] = field_slice[self.len - 1];
                 field_slice[self.len - 1] = undefined;
@@ -233,7 +234,7 @@ pub fn MultiArrayList(comptime S: type) type {
         /// after it to preserve order.
         pub fn orderedRemove(self: *Self, index: usize) void {
             const slices = self.slice();
-            inline for (fields) |field_info, field_index| {
+            inline for (fields) |_, field_index| {
                 const field_slice = slices.items(@intToEnum(Field, field_index));
                 var i = index;
                 while (i < self.len - 1) : (i += 1) {
lib/std/net.zig
@@ -270,6 +270,8 @@ pub const Ip4Address = extern struct {
         options: std.fmt.FormatOptions,
         out_stream: anytype,
     ) !void {
+        _ = fmt;
+        _ = options;
         const bytes = @ptrCast(*const [4]u8, &self.sa.addr);
         try std.fmt.format(out_stream, "{}.{}.{}.{}:{}", .{
             bytes[0],
@@ -281,6 +283,7 @@ pub const Ip4Address = extern struct {
     }
 
     pub fn getOsSockLen(self: Ip4Address) os.socklen_t {
+        _ = self;
         return @sizeOf(os.sockaddr_in);
     }
 };
@@ -556,6 +559,8 @@ pub const Ip6Address = extern struct {
         options: std.fmt.FormatOptions,
         out_stream: anytype,
     ) !void {
+        _ = fmt;
+        _ = options;
         const port = mem.bigToNative(u16, self.sa.port);
         if (mem.eql(u8, self.sa.addr[0..12], &[_]u8{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff })) {
             try std.fmt.format(out_stream, "[::ffff:{}.{}.{}.{}]:{}", .{
@@ -598,6 +603,7 @@ pub const Ip6Address = extern struct {
     }
 
     pub fn getOsSockLen(self: Ip6Address) os.socklen_t {
+        _ = self;
         return @sizeOf(os.sockaddr_in6);
     }
 };
@@ -1062,6 +1068,7 @@ fn IN6_IS_ADDR_SITELOCAL(a: [16]u8) bool {
 
 // Parameters `b` and `a` swapped to make this descending.
 fn addrCmpLessThan(context: void, b: LookupAddr, a: LookupAddr) bool {
+    _ = context;
     return a.sortkey < b.sortkey;
 }
 
lib/std/once.zig
@@ -61,6 +61,7 @@ test "Once executes its function just once" {
         for (threads) |*handle| {
             handle.* = try std.Thread.spawn(struct {
                 fn thread_fn(x: u8) void {
+                    _ = x;
                     global_once.call();
                 }
             }.thread_fn, 0);
lib/std/os.zig
@@ -1164,6 +1164,7 @@ fn openOptionsFromFlags(flags: u32) windows.OpenFileOptions {
 /// TODO currently, this function does not handle all flag combinations
 /// or makes use of perm argument.
 pub fn openW(file_path_w: []const u16, flags: u32, perm: mode_t) OpenError!fd_t {
+    _ = perm;
     var options = openOptionsFromFlags(flags);
     options.dir = std.fs.cwd().fd;
     return windows.OpenFile(file_path_w, options) catch |err| switch (err) {
@@ -1273,6 +1274,7 @@ pub fn openatZ(dir_fd: fd_t, file_path: [*:0]const u8, flags: u32, mode: mode_t)
 /// TODO currently, this function does not handle all flag combinations
 /// or makes use of perm argument.
 pub fn openatW(dir_fd: fd_t, file_path_w: []const u16, flags: u32, mode: mode_t) OpenError!fd_t {
+    _ = mode;
     var options = openOptionsFromFlags(flags);
     options.dir = dir_fd;
     return windows.OpenFile(file_path_w, options) catch |err| switch (err) {
@@ -2169,6 +2171,7 @@ pub fn mkdirat(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!v
 pub const mkdiratC = @compileError("deprecated: renamed to mkdiratZ");
 
 pub fn mkdiratWasi(dir_fd: fd_t, sub_dir_path: []const u8, mode: u32) MakeDirError!void {
+    _ = mode;
     switch (wasi.path_create_directory(dir_fd, sub_dir_path.ptr, sub_dir_path.len)) {
         wasi.ESUCCESS => return,
         wasi.EACCES => return error.AccessDenied,
@@ -2216,6 +2219,7 @@ pub fn mkdiratZ(dir_fd: fd_t, sub_dir_path: [*:0]const u8, mode: u32) MakeDirErr
 }
 
 pub fn mkdiratW(dir_fd: fd_t, sub_path_w: []const u16, mode: u32) MakeDirError!void {
+    _ = mode;
     const sub_dir_handle = windows.OpenFile(sub_path_w, .{
         .dir = dir_fd,
         .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
@@ -2291,6 +2295,7 @@ pub fn mkdirZ(dir_path: [*:0]const u8, mode: u32) MakeDirError!void {
 
 /// Windows-only. Same as `mkdir` but the parameters is  WTF16 encoded.
 pub fn mkdirW(dir_path_w: []const u16, mode: u32) MakeDirError!void {
+    _ = mode;
     const sub_dir_handle = windows.OpenFile(dir_path_w, .{
         .dir = std.fs.cwd().fd,
         .access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
@@ -3868,6 +3873,7 @@ pub fn accessZ(path: [*:0]const u8, mode: u32) AccessError!void {
 /// Otherwise use `access` or `accessC`.
 /// TODO currently this ignores `mode`.
 pub fn accessW(path: [*:0]const u16, mode: u32) windows.GetFileAttributesError!void {
+    _ = mode;
     const ret = try windows.GetFileAttributesW(path);
     if (ret != windows.INVALID_FILE_ATTRIBUTES) {
         return;
@@ -3918,6 +3924,8 @@ pub fn faccessatZ(dirfd: fd_t, path: [*:0]const u8, mode: u32, flags: u32) Acces
 /// is NtDll-prefixed, null-terminated, WTF-16 encoded.
 /// TODO currently this ignores `mode` and `flags`
 pub fn faccessatW(dirfd: fd_t, sub_path_w: [*:0]const u16, mode: u32, flags: u32) AccessError!void {
+    _ = mode;
+    _ = flags;
     if (sub_path_w[0] == '.' and sub_path_w[1] == 0) {
         return;
     }
@@ -4895,6 +4903,8 @@ pub fn res_mkquery(
     newrr: ?[*]const u8,
     buf: []u8,
 ) usize {
+    _ = data;
+    _ = newrr;
     // This implementation is ported from musl libc.
     // A more idiomatic "ziggy" implementation would be welcome.
     var name = dname;
lib/std/packed_int_array.zig
@@ -194,6 +194,7 @@ pub fn PackedIntArrayEndian(comptime Int: type, comptime endian: Endian, comptim
 
         ///Returns the number of elements in the packed array
         pub fn len(self: Self) usize {
+            _ = self;
             return int_count;
         }
 
lib/std/pdb.zig
@@ -675,6 +675,7 @@ pub const Pdb = struct {
     }
 
     pub fn getSymbolName(self: *Pdb, module: *Module, address: u64) ?[]const u8 {
+        _ = self;
         std.debug.assert(module.populated);
 
         var symbol_i: usize = 0;
@@ -906,7 +907,7 @@ const Msf = struct {
         // These streams are not used, but still participate in the file
         // and must be taken into account when resolving stream indices.
         const Nil = 0xFFFFFFFF;
-        for (stream_sizes) |*s, i| {
+        for (stream_sizes) |*s| {
             const size = try directory.reader().readIntLittle(u32);
             s.* = if (size == Nil) 0 else blockCountFromSize(size, superblock.BlockSize);
         }
lib/std/priority_dequeue.zig
@@ -428,7 +428,7 @@ pub fn PriorityDequeue(comptime T: type) type {
                 warn("{}, ", .{e});
             }
             warn("array: ", .{});
-            for (self.items) |e, i| {
+            for (self.items) |e| {
                 warn("{}, ", .{e});
             }
             warn("len: {} ", .{self.len});
lib/std/priority_queue.zig
@@ -249,7 +249,7 @@ pub fn PriorityQueue(comptime T: type) type {
                 warn("{}, ", .{e});
             }
             warn("array: ", .{});
-            for (self.items) |e, i| {
+            for (self.items) |e| {
                 warn("{}, ", .{e});
             }
             warn("len: {} ", .{self.len});
lib/std/process.zig
@@ -419,6 +419,7 @@ pub const ArgIteratorWindows = struct {
         };
     }
     fn emitBackslashes(self: *ArgIteratorWindows, buf: *std.ArrayList(u16), emit_count: usize) !void {
+        _ = self;
         var i: usize = 0;
         while (i < emit_count) : (i += 1) {
             try buf.append(std.mem.nativeToLittle(u16, '\\'));
@@ -748,6 +749,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
             }
             try os.dl_iterate_phdr(&paths, error{OutOfMemory}, struct {
                 fn callback(info: *os.dl_phdr_info, size: usize, list: *List) !void {
+                    _ = size;
                     const name = info.dlpi_name orelse return;
                     if (name[0] == '/') {
                         const item = try list.allocator.dupeZ(u8, mem.spanZ(name));
lib/std/Progress.zig
@@ -295,7 +295,7 @@ fn refreshWithHeldLock(self: *Progress) void {
         end += 1;
     }
 
-    _ = file.write(self.output_buffer[0..end]) catch |e| {
+    _ = file.write(self.output_buffer[0..end]) catch {
         // Stop trying to write to this file once it errors.
         self.terminal = null;
     };
lib/std/SemanticVersion.zig
@@ -162,6 +162,7 @@ pub fn format(
     options: std.fmt.FormatOptions,
     out_stream: anytype,
 ) !void {
+    _ = options;
     if (fmt.len != 0) @compileError("Unknown format string: '" ++ fmt ++ "'");
     try std.fmt.format(out_stream, "{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
     if (self.pre) |pre| try std.fmt.format(out_stream, "-{s}", .{pre});
@@ -259,7 +260,7 @@ test "SemanticVersion format" {
 
     // Invalid version string that may overflow.
     const big_invalid = "99999999999999999999999.999999999999999999.99999999999999999----RC-SNAPSHOT.12.09.1--------------------------------..12";
-    if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |err| {}
+    if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |_| {}
 }
 
 test "SemanticVersion precedence" {
lib/std/sort.zig
@@ -37,9 +37,11 @@ pub fn binarySearch(
 test "binarySearch" {
     const S = struct {
         fn order_u32(context: void, lhs: u32, rhs: u32) math.Order {
+            _ = context;
             return math.order(lhs, rhs);
         }
         fn order_i32(context: void, lhs: i32, rhs: i32) math.Order {
+            _ = context;
             return math.order(lhs, rhs);
         }
     };
@@ -1133,6 +1135,7 @@ fn swap(
 pub fn asc(comptime T: type) fn (void, T, T) bool {
     const impl = struct {
         fn inner(context: void, a: T, b: T) bool {
+            _ = context;
             return a < b;
         }
     };
@@ -1144,6 +1147,7 @@ pub fn asc(comptime T: type) fn (void, T, T) bool {
 pub fn desc(comptime T: type) fn (void, T, T) bool {
     const impl = struct {
         fn inner(context: void, a: T, b: T) bool {
+            _ = context;
             return a > b;
         }
     };
lib/std/target.zig
@@ -157,7 +157,7 @@ pub const Target = struct {
             pub fn format(
                 self: WindowsVersion,
                 comptime fmt: []const u8,
-                options: std.fmt.FormatOptions,
+                _: std.fmt.FormatOptions,
                 out_stream: anytype,
             ) !void {
                 if (fmt.len > 0 and fmt[0] == 's') {
lib/std/unicode.zig
@@ -210,7 +210,7 @@ pub fn utf8ValidateSlice(s: []const u8) bool {
                 return false;
             }
             i += cp_len;
-        } else |err| {
+        } else |_| {
             return false;
         }
     }
src/codegen/arm.zig
@@ -674,7 +674,7 @@ pub const Instruction = union(enum) {
         };
         const imm4h: u4 = switch (offset) {
             .immediate => |imm| @truncate(u4, imm >> 4),
-            .register => |reg| 0b0000,
+            .register => 0b0000,
         };
 
         return Instruction{
src/codegen/c.zig
@@ -47,6 +47,8 @@ fn formatTypeAsCIdentifier(
     options: std.fmt.FormatOptions,
     writer: anytype,
 ) !void {
+    _ = fmt;
+    _ = options;
     var buffer = [1]u8{0} ** 128;
     // We don't care if it gets cut off, it's still more unique than a number
     var buf = std.fmt.bufPrint(&buffer, "{}", .{data}) catch &buffer;
@@ -63,6 +65,8 @@ fn formatIdent(
     options: std.fmt.FormatOptions,
     writer: anytype,
 ) !void {
+    _ = fmt;
+    _ = options;
     for (ident) |c, i| {
         switch (c) {
             'a'...'z', 'A'...'Z', '_' => try writer.writeByte(c),
@@ -747,6 +751,7 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi
 }
 
 fn genVarPtr(o: *Object, inst: *Inst.VarPtr) !CValue {
+    _ = o;
     return CValue{ .decl_ref = inst.variable.owner_decl };
 }
 
@@ -937,6 +942,8 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue {
 }
 
 fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue {
+    _ = o;
+    _ = inst;
     // TODO emit #line directive here with line number and filename
     return CValue.none;
 }
@@ -1016,11 +1023,13 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue {
 }
 
 fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue {
+    _ = inst;
     try o.writer().writeAll("zig_breakpoint();\n");
     return CValue.none;
 }
 
 fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue {
+    _ = inst;
     try o.writer().writeAll("zig_unreachable();\n");
     return CValue.none;
 }
src/codegen/llvm.zig
@@ -154,6 +154,7 @@ pub const Object = struct {
     object_pathZ: [:0]const u8,
 
     pub fn create(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Object {
+        _ = sub_path;
         const self = try allocator.create(Object);
         errdefer allocator.destroy(self);
 
@@ -742,6 +743,7 @@ pub const FuncGen = struct {
     }
 
     fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
+        _ = inst;
         _ = self.builder.buildRetVoid();
         return null;
     }
@@ -873,6 +875,7 @@ pub const FuncGen = struct {
     }
 
     fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value {
+        _ = inst;
         _ = self.builder.buildUnreachable();
         return null;
     }
@@ -1013,6 +1016,7 @@ pub const FuncGen = struct {
     }
 
     fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value {
+        _ = inst;
         const llvn_fn = self.getIntrinsic("llvm.debugtrap");
         _ = self.builder.buildCall(llvn_fn, null, 0, "");
         return null;
src/codegen/wasm.zig
@@ -702,7 +702,7 @@ pub const Context = struct {
                 try writer.writeByte(wasm.valtype(.i32)); // error code is always an i32 integer.
                 try writer.writeByte(val_type);
             },
-            else => |ret_type| {
+            else => {
                 try leb.writeULEB128(writer, @as(u32, 1));
                 // Can we maybe get the source index of the return type?
                 const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type);
@@ -721,7 +721,7 @@ pub const Context = struct {
                 // TODO: check for and handle death of instructions
                 const mod_fn = blk: {
                     if (typed_value.val.castTag(.function)) |func| break :blk func.data;
-                    if (typed_value.val.castTag(.extern_fn)) |ext_fn| return Result.appended; // don't need code body for extern functions
+                    if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions
                     unreachable;
                 };
 
@@ -910,7 +910,7 @@ pub const Context = struct {
                 },
                 else => unreachable,
             },
-            .local => |local| {
+            .local => {
                 try self.emitWValue(rhs);
                 try writer.writeByte(wasm.opcode(.local_set));
                 try leb.writeULEB128(writer, lhs.local);
@@ -925,6 +925,7 @@ pub const Context = struct {
     }
 
     fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue {
+        _ = inst;
         // arguments share the index with locals
         defer self.local_index += 1;
         return WValue{ .local = self.local_index };
@@ -1213,12 +1214,15 @@ pub const Context = struct {
     }
 
     fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue {
+        _ = self;
+        _ = breakpoint;
         // unsupported by wasm itself. Can be implemented once we support DWARF
         // for wasm
         return .none;
     }
 
     fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue {
+        _ = unreach;
         try self.code.append(wasm.opcode(.@"unreachable"));
         return .none;
     }
src/link/MachO/bind.zig
@@ -10,6 +10,7 @@ pub const Pointer = struct {
 };
 
 pub fn pointerCmp(context: void, a: Pointer, b: Pointer) bool {
+    _ = context;
     if (a.segment_id < b.segment_id) return true;
     if (a.segment_id == b.segment_id) {
         return a.offset < b.offset;
src/link/MachO/DebugSymbols.zig
@@ -899,6 +899,7 @@ fn writeStringTable(self: *DebugSymbols) !void {
 }
 
 pub fn updateDeclLineNumber(self: *DebugSymbols, module: *Module, decl: *const Module.Decl) !void {
+    _ = module;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -926,6 +927,8 @@ pub fn initDeclDebugBuffers(
     module: *Module,
     decl: *Module.Decl,
 ) !DeclDebugBuffers {
+    _ = self;
+    _ = module;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1188,6 +1191,7 @@ fn addDbgInfoType(
     dbg_info_buffer: *std.ArrayList(u8),
     target: std.Target,
 ) !void {
+    _ = self;
     switch (ty.zigTypeTag()) {
         .Void => unreachable,
         .NoReturn => unreachable,
@@ -1364,6 +1368,7 @@ fn getRelocDbgInfoSubprogramHighPC() u32 {
 }
 
 fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
+    _ = self;
     const directory_entry_format_count = 1;
     const file_name_entry_format_count = 1;
     const directory_count = 1;
@@ -1378,6 +1383,7 @@ fn dbgLineNeededHeaderBytes(self: DebugSymbols, module: *Module) u32 {
 }
 
 fn dbgInfoNeededHeaderBytes(self: DebugSymbols) u32 {
+    _ = self;
     return 120;
 }
 
src/link/MachO/Zld.zig
@@ -108,6 +108,7 @@ const TlvOffset = struct {
     offset: u64,
 
     fn cmp(context: void, a: TlvOffset, b: TlvOffset) bool {
+        _ = context;
         return a.source_addr < b.source_addr;
     }
 };
@@ -437,7 +438,7 @@ fn updateMetadata(self: *Zld) !void {
         const data_seg = &self.load_commands.items[self.data_segment_cmd_index.?].Segment;
 
         // Create missing metadata
-        for (object.sections.items) |sect, sect_id| {
+        for (object.sections.items) |sect| {
             const segname = sect.segname();
             const sectname = sect.sectname();
 
@@ -1373,7 +1374,7 @@ fn allocateTentativeSymbols(self: *Zld) !void {
     }
 
     // Convert tentative definitions into regular symbols.
-    for (self.tentatives.values()) |sym, i| {
+    for (self.tentatives.values()) |sym| {
         const tent = sym.cast(Symbol.Tentative) orelse unreachable;
         const reg = try self.allocator.create(Symbol.Regular);
         errdefer self.allocator.destroy(reg);
@@ -1758,7 +1759,7 @@ fn resolveSymbolsInObject(self: *Zld, object: *Object) !void {
 
             t_sym.alias = sym;
             sym_ptr.* = sym;
-        } else if (sym.cast(Symbol.Unresolved)) |und| {
+        } else if (sym.cast(Symbol.Unresolved)) |_| {
             if (self.globals.get(sym.name)) |g_sym| {
                 sym.alias = g_sym;
                 continue;
src/link/C.zig
@@ -76,7 +76,12 @@ pub fn deinit(self: *C) void {
     self.decl_table.deinit(self.base.allocator);
 }
 
-pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
+pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {
+    if (false) {
+        self;
+        decl;
+    }
+}
 
 pub fn freeDecl(self: *C, decl: *Module.Decl) void {
     _ = self.decl_table.swapRemove(decl);
@@ -307,4 +312,11 @@ pub fn updateDeclExports(
     module: *Module,
     decl: *Module.Decl,
     exports: []const *Module.Export,
-) !void {}
+) !void {
+    if (false) {
+        exports;
+        decl;
+        module;
+        self;
+    }
+}
src/link/Coff.zig
@@ -831,7 +831,7 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
                 .target = self.base.options.target,
                 .output_mode = .Obj,
             });
-            const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+            const o_directory = module.zig_cache_artifact_directory;
             const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
             break :blk full_obj_path;
         }
@@ -1340,6 +1340,9 @@ pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
 }
 
 pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
+    _ = self;
+    _ = module;
+    _ = decl;
     // TODO Implement this
 }
 
src/link/Elf.zig
@@ -1262,7 +1262,7 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
                 .target = self.base.options.target,
                 .output_mode = .Obj,
             });
-            const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+            const o_directory = module.zig_cache_artifact_directory;
             const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
             break :blk full_obj_path;
         }
@@ -1938,6 +1938,11 @@ fn freeTextBlock(self: *Elf, text_block: *TextBlock) void {
 }
 
 fn shrinkTextBlock(self: *Elf, text_block: *TextBlock, new_block_size: u64) void {
+    if (false) {
+        self;
+        text_block;
+        new_block_size;
+    }
     // TODO check the new capacity, and if it crosses the size threshold into a big enough
     // capacity, insert a free list node for it.
 }
@@ -2706,6 +2711,7 @@ pub fn updateDeclExports(
 
 /// Must be called only after a successful call to `updateDecl`.
 pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Decl) !void {
+    _ = module;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2979,6 +2985,7 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
 }
 
 fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
+    _ = self;
     return 120;
 }
 
@@ -3372,7 +3379,7 @@ const CsuObjects = struct {
                     if (result.crtend) |*obj| obj.* = try fs.path.join(arena, &[_][]const u8{ gcc_dir_path, obj.* });
                 },
                 else => {
-                    inline for (std.meta.fields(@TypeOf(result))) |f, i| {
+                    inline for (std.meta.fields(@TypeOf(result))) |f| {
                         if (@field(result, f.name)) |*obj| {
                             obj.* = try fs.path.join(arena, &[_][]const u8{ crt_dir_path, obj.* });
                         }
@@ -3380,7 +3387,7 @@ const CsuObjects = struct {
                 },
             }
         } else {
-            inline for (std.meta.fields(@TypeOf(result))) |f, i| {
+            inline for (std.meta.fields(@TypeOf(result))) |f| {
                 if (@field(result, f.name)) |*obj| {
                     if (comp.crt_files.get(obj.*)) |crtf| {
                         obj.* = crtf.full_object_path;
src/link/MachO.zig
@@ -441,6 +441,7 @@ pub fn flush(self: *MachO, comp: *Compilation) !void {
 }
 
 pub fn flushModule(self: *MachO, comp: *Compilation) !void {
+    _ = comp;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -533,7 +534,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
                 .target = self.base.options.target,
                 .output_mode = .Obj,
             });
-            const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+            const o_directory = module.zig_cache_artifact_directory;
             const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
             break :blk full_obj_path;
         }
@@ -1254,6 +1255,9 @@ fn freeTextBlock(self: *MachO, text_block: *TextBlock) void {
 }
 
 fn shrinkTextBlock(self: *MachO, text_block: *TextBlock, new_block_size: u64) void {
+    _ = self;
+    _ = text_block;
+    _ = new_block_size;
     // TODO check the new capacity, and if it crosses the size threshold into a big enough
     // capacity, insert a free list node for it.
 }
src/link/SpirV.zig
@@ -102,6 +102,7 @@ pub fn deinit(self: *SpirV) void {
 }
 
 pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void {
+    _ = module;
     // Keep track of all decls so we can iterate over them on flush().
     _ = try self.decl_table.getOrPut(self.base.allocator, decl);
 }
@@ -111,7 +112,14 @@ pub fn updateDeclExports(
     module: *Module,
     decl: *const Module.Decl,
     exports: []const *Module.Export,
-) !void {}
+) !void {
+    if (false) {
+        self;
+        module;
+        decl;
+        exports;
+    }
+}
 
 pub fn freeDecl(self: *SpirV, decl: *Module.Decl) void {
     assert(self.decl_table.swapRemove(decl));
src/link/Wasm.zig
@@ -216,7 +216,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
             try module.failed_decls.put(module.gpa, decl, context.err_msg);
             return;
         },
-        else => |e| return err,
+        else => |e| return e,
     };
 
     const code: []const u8 = switch (result) {
@@ -258,7 +258,14 @@ pub fn updateDeclExports(
     module: *Module,
     decl: *const Module.Decl,
     exports: []const *Module.Export,
-) !void {}
+) !void {
+    if (false) {
+        self;
+        module;
+        decl;
+        exports;
+    }
+}
 
 pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
     if (self.getFuncidx(decl)) |func_idx| {
@@ -300,6 +307,7 @@ pub fn flush(self: *Wasm, comp: *Compilation) !void {
 }
 
 pub fn flushModule(self: *Wasm, comp: *Compilation) !void {
+    _ = comp;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -557,7 +565,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
                 .target = self.base.options.target,
                 .output_mode = .Obj,
             });
-            const o_directory = self.base.options.module.?.zig_cache_artifact_directory;
+            const o_directory = module.zig_cache_artifact_directory;
             const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
             break :blk full_obj_path;
         }
src/air.zig
@@ -304,9 +304,12 @@ pub const Inst = struct {
         base: Inst,
 
         pub fn operandCount(self: *const NoOp) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const NoOp, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -316,6 +319,7 @@ pub const Inst = struct {
         operand: *Inst,
 
         pub fn operandCount(self: *const UnOp) usize {
+            _ = self;
             return 1;
         }
         pub fn getOperand(self: *const UnOp, index: usize) ?*Inst {
@@ -331,6 +335,7 @@ pub const Inst = struct {
         rhs: *Inst,
 
         pub fn operandCount(self: *const BinOp) usize {
+            _ = self;
             return 2;
         }
         pub fn getOperand(self: *const BinOp, index: usize) ?*Inst {
@@ -356,9 +361,12 @@ pub const Inst = struct {
         name: [*:0]const u8,
 
         pub fn operandCount(self: *const Arg) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const Arg, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -391,9 +399,12 @@ pub const Inst = struct {
         body: Body,
 
         pub fn operandCount(self: *const Block) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const Block, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -412,9 +423,12 @@ pub const Inst = struct {
         body: Body,
 
         pub fn operandCount(self: *const BrBlockFlat) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -427,9 +441,11 @@ pub const Inst = struct {
         operand: *Inst,
 
         pub fn operandCount(self: *const Br) usize {
+            _ = self;
             return 1;
         }
         pub fn getOperand(self: *const Br, index: usize) ?*Inst {
+            _ = self;
             if (index == 0)
                 return self.operand;
             return null;
@@ -443,9 +459,12 @@ pub const Inst = struct {
         block: *Block,
 
         pub fn operandCount(self: *const BrVoid) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const BrVoid, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -490,6 +509,7 @@ pub const Inst = struct {
         else_death_count: u32 = 0,
 
         pub fn operandCount(self: *const CondBr) usize {
+            _ = self;
             return 1;
         }
         pub fn getOperand(self: *const CondBr, index: usize) ?*Inst {
@@ -516,9 +536,12 @@ pub const Inst = struct {
         val: Value,
 
         pub fn operandCount(self: *const Constant) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const Constant, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -530,9 +553,12 @@ pub const Inst = struct {
         body: Body,
 
         pub fn operandCount(self: *const Loop) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const Loop, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -544,9 +570,12 @@ pub const Inst = struct {
         variable: *Module.Var,
 
         pub fn operandCount(self: *const VarPtr) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const VarPtr, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
@@ -559,9 +588,12 @@ pub const Inst = struct {
         field_index: usize,
 
         pub fn operandCount(self: *const StructFieldPtr) usize {
+            _ = self;
             return 1;
         }
         pub fn getOperand(self: *const StructFieldPtr, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             var i = index;
 
             if (i < 1)
@@ -593,6 +625,7 @@ pub const Inst = struct {
         };
 
         pub fn operandCount(self: *const SwitchBr) usize {
+            _ = self;
             return 1;
         }
         pub fn getOperand(self: *const SwitchBr, index: usize) ?*Inst {
@@ -621,9 +654,12 @@ pub const Inst = struct {
         column: u32,
 
         pub fn operandCount(self: *const DbgStmt) usize {
+            _ = self;
             return 0;
         }
         pub fn getOperand(self: *const DbgStmt, index: usize) ?*Inst {
+            _ = self;
+            _ = index;
             return null;
         }
     };
src/AstGen.zig
@@ -925,6 +925,7 @@ fn suspendExpr(
     rl: ResultLoc,
     node: ast.Node.Index,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
     const tree = astgen.tree;
@@ -1208,6 +1209,7 @@ fn arrayInitExprRlNone(
     elements: []const ast.Node.Index,
     tag: Zir.Inst.Tag,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
     const elem_list = try gpa.alloc(Zir.Inst.Ref, elements.len);
@@ -1233,6 +1235,9 @@ fn arrayInitExprRlTy(
     elem_ty_inst: Zir.Inst.Ref,
     tag: Zir.Inst.Tag,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
+    _ = array_ty_inst;
+    _ = elem_ty_inst;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
 
@@ -1259,6 +1264,7 @@ fn arrayInitExprRlPtr(
     elements: []const ast.Node.Index,
     result_ptr: Zir.Inst.Ref,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
 
@@ -1368,6 +1374,7 @@ fn structInitExprRlNone(
     struct_init: ast.full.StructInit,
     tag: Zir.Inst.Tag,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
     const tree = astgen.tree;
@@ -1403,6 +1410,7 @@ fn structInitExprRlPtr(
     struct_init: ast.full.StructInit,
     result_ptr: Zir.Inst.Ref,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
     const tree = astgen.tree;
@@ -1439,6 +1447,7 @@ fn structInitExprRlTy(
     ty_inst: Zir.Inst.Ref,
     tag: Zir.Inst.Tag,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const astgen = gz.astgen;
     const gpa = astgen.gpa;
     const tree = astgen.tree;
@@ -1781,6 +1790,7 @@ fn blockExprStmts(
     node: ast.Node.Index,
     statements: []const ast.Node.Index,
 ) !void {
+    _ = node;
     const astgen = gz.astgen;
     const tree = astgen.tree;
     const node_tags = tree.nodes.items(.tag);
@@ -2117,6 +2127,7 @@ fn genDefers(
     inner_scope: *Scope,
     err_code: Zir.Inst.Ref,
 ) InnerError!void {
+    _ = err_code;
     const astgen = gz.astgen;
     const tree = astgen.tree;
     const node_datas = tree.nodes.items(.data);
@@ -2201,6 +2212,7 @@ fn deferStmt(
     block_arena: *Allocator,
     scope_tag: Scope.Tag,
 ) InnerError!*Scope {
+    _ = gz;
     const defer_scope = try block_arena.create(Scope.Defer);
     defer_scope.* = .{
         .base = .{ .tag = scope_tag },
@@ -4703,6 +4715,8 @@ fn finishThenElseBlock(
     then_break_block: Zir.Inst.Index,
     break_tag: Zir.Inst.Tag,
 ) InnerError!Zir.Inst.Ref {
+    _ = then_src;
+    _ = else_src;
     // We now have enough information to decide whether the result instruction should
     // be communicated via result location pointer or break instructions.
     const strat = rl.strategy(block_scope);
@@ -4886,7 +4900,7 @@ fn ifExpr(
         inst: Zir.Inst.Ref,
         bool_bit: Zir.Inst.Ref,
     } = c: {
-        if (if_full.error_token) |error_token| {
+        if (if_full.error_token) |_| {
             const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
             const err_union = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr);
             const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err;
@@ -4894,7 +4908,7 @@ fn ifExpr(
                 .inst = err_union,
                 .bool_bit = try block_scope.addUnNode(tag, err_union, node),
             };
-        } else if (if_full.payload_token) |payload_token| {
+        } else if (if_full.payload_token) |_| {
             const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
             const optional = try expr(&block_scope, &block_scope.base, cond_rl, if_full.ast.cond_expr);
             const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
@@ -5146,7 +5160,7 @@ fn whileExpr(
         inst: Zir.Inst.Ref,
         bool_bit: Zir.Inst.Ref,
     } = c: {
-        if (while_full.error_token) |error_token| {
+        if (while_full.error_token) |_| {
             const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
             const err_union = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr);
             const tag: Zir.Inst.Tag = if (payload_is_ref) .is_err_ptr else .is_err;
@@ -5154,7 +5168,7 @@ fn whileExpr(
                 .inst = err_union,
                 .bool_bit = try continue_scope.addUnNode(tag, err_union, node),
             };
-        } else if (while_full.payload_token) |payload_token| {
+        } else if (while_full.payload_token) |_| {
             const cond_rl: ResultLoc = if (payload_is_ref) .ref else .none;
             const optional = try expr(&continue_scope, &continue_scope.base, cond_rl, while_full.ast.cond_expr);
             const tag: Zir.Inst.Tag = if (payload_is_ref) .is_non_null_ptr else .is_non_null;
@@ -6665,6 +6679,7 @@ fn unionInitRlPtr(
     union_type: Zir.Inst.Ref,
     field_name: Zir.Inst.Ref,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
     const union_init_ptr = try parent_gz.addPlNode(.union_init_ptr, node, Zir.Inst.UnionInitPtr{
         .result_ptr = result_ptr,
         .union_type = union_type,
@@ -6753,6 +6768,8 @@ fn bitCastRlPtr(
     result_ptr: Zir.Inst.Ref,
     rhs: ast.Node.Index,
 ) InnerError!Zir.Inst.Ref {
+    _ = rl;
+    _ = scope;
     const casted_result_ptr = try gz.addPlNode(.bitcast_result_ptr, node, Zir.Inst.Bin{
         .lhs = dest_type,
         .rhs = result_ptr,
@@ -8013,6 +8030,7 @@ fn rvalue(
     result: Zir.Inst.Ref,
     src_node: ast.Node.Index,
 ) InnerError!Zir.Inst.Ref {
+    _ = scope;
     switch (rl) {
         .none, .none_or_ref => return result,
         .discard => {
src/codegen.zig
@@ -564,7 +564,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             .r11 = true, // fp
                             .r14 = true, // lr
                         };
-                        inline for (callee_preserved_regs) |reg, i| {
+                        inline for (callee_preserved_regs) |reg| {
                             if (self.register_manager.isRegAllocated(reg)) {
                                 @field(saved_regs, @tagName(reg)) = true;
                             }
@@ -602,7 +602,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             } else {
                                 if (math.cast(i26, amt)) |offset| {
                                     writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(.al, offset).toU32());
-                                } else |err| {
+                                } else |_| {
                                     return self.failSymbol("exitlude jump is too large", .{});
                                 }
                             }
@@ -675,7 +675,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             } else {
                                 if (math.cast(i28, amt)) |offset| {
                                     writeInt(u32, self.code.items[jmp_reloc..][0..4], Instruction.b(offset).toU32());
-                                } else |err| {
+                                } else |_| {
                                     return self.failSymbol("exitlude jump is too large", .{});
                                 }
                             }
@@ -1497,6 +1497,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
             swap_lhs_and_rhs: bool,
             op: ir.Inst.Tag,
         ) !void {
+            _ = src;
             assert(lhs_mcv == .register or rhs_mcv == .register);
 
             const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register;
@@ -1905,6 +1906,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             try self.genX8664ModRMRegToStack(src, dst_ty, off, src_reg, mr + 0x1);
                         },
                         .immediate => |imm| {
+                            _ = imm;
                             return self.fail(src, "TODO implement x86 ADD/SUB/CMP source immediate", .{});
                         },
                         .embedded_in_code, .memory, .stack_offset => {
@@ -2054,6 +2056,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             return self.genSetStack(src, dst_ty, off, MCValue{ .register = dst_reg });
                         },
                         .immediate => |imm| {
+                            _ = imm;
                             return self.fail(src, "TODO implement x86 multiply source immediate", .{});
                         },
                         .embedded_in_code, .memory, .stack_offset => {
@@ -2982,14 +2985,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                 .arm, .armeb => {
                     if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
                         writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32());
-                    } else |err| {
+                    } else |_| {
                         return self.fail(src, "TODO: enable larger branch offset", .{});
                     }
                 },
                 .aarch64, .aarch64_be, .aarch64_32 => {
                     if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| {
                         writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32());
-                    } else |err| {
+                    } else |_| {
                         return self.fail(src, "TODO: enable larger branch offset", .{});
                     }
                 },
@@ -3307,16 +3310,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                         }
                     },
                     .compare_flags_unsigned => |op| {
+                        _ = op;
                         return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
                     },
                     .compare_flags_signed => |op| {
+                        _ = op;
                         return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
                     },
                     .immediate => {
                         const reg = try self.copyToTmpRegister(src, ty, mcv);
                         return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg });
                     },
-                    .embedded_in_code => |code_offset| {
+                    .embedded_in_code => {
                         return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
                     },
                     .register => |reg| {
@@ -3352,7 +3357,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}),
                         }
                     },
-                    .memory => |vaddr| {
+                    .memory => {
                         return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
                     },
                     .stack_offset => |off| {
@@ -3380,10 +3385,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             else => return self.fail(src, "TODO implement memset", .{}),
                         }
                     },
-                    .compare_flags_unsigned => |op| {
+                    .compare_flags_unsigned => {
                         return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
                     },
-                    .compare_flags_signed => |op| {
+                    .compare_flags_signed => {
                         return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
                     },
                     .immediate => |x_big| {
@@ -3435,13 +3440,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             },
                         }
                     },
-                    .embedded_in_code => |code_offset| {
+                    .embedded_in_code => {
                         return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
                     },
                     .register => |reg| {
                         try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89);
                     },
-                    .memory => |vaddr| {
+                    .memory => {
                         return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
                     },
                     .stack_offset => |off| {
@@ -3469,17 +3474,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             else => return self.fail(src, "TODO implement memset", .{}),
                         }
                     },
-                    .compare_flags_unsigned => |op| {
+                    .compare_flags_unsigned => {
                         return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{});
                     },
-                    .compare_flags_signed => |op| {
+                    .compare_flags_signed => {
                         return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{});
                     },
                     .immediate => {
                         const reg = try self.copyToTmpRegister(src, ty, mcv);
                         return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg });
                     },
-                    .embedded_in_code => |code_offset| {
+                    .embedded_in_code => {
                         return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{});
                     },
                     .register => |reg| {
@@ -3511,7 +3516,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                             else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}),
                         }
                     },
-                    .memory => |vaddr| {
+                    .memory => {
                         return self.fail(src, "TODO implement set stack variable from memory vaddr", .{});
                     },
                     .stack_offset => |off| {
@@ -3842,6 +3847,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                         );
                     },
                     .compare_flags_signed => |op| {
+                        _ = op;
                         return self.fail(src, "TODO set register with compare flags value (signed)", .{});
                     },
                     .immediate => |x| {
@@ -4460,6 +4466,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
                     dummy,
 
                     pub fn allocIndex(self: Register) ?u4 {
+                        _ = self;
                         return null;
                     }
                 };
src/Compilation.zig
@@ -523,6 +523,7 @@ pub const AllErrors = struct {
         errors: *std.ArrayList(Message),
         msg: []const u8,
     ) !void {
+        _ = arena;
         try errors.append(.{ .plain = .{ .msg = msg } });
     }
 
src/link.zig
@@ -517,7 +517,7 @@ pub const File = struct {
                     .target = base.options.target,
                     .output_mode = .Obj,
                 });
-                const o_directory = base.options.module.?.zig_cache_artifact_directory;
+                const o_directory = module.zig_cache_artifact_directory;
                 const full_obj_path = try o_directory.join(arena, &[_][]const u8{obj_basename});
                 break :blk full_obj_path;
             }
src/main.zig
@@ -500,7 +500,7 @@ const Emit = union(enum) {
 };
 
 fn optionalBoolEnvVar(arena: *Allocator, name: []const u8) !bool {
-    if (std.process.getEnvVarOwned(arena, name)) |value| {
+    if (std.process.getEnvVarOwned(arena, name)) |_| {
         return true;
     } else |err| switch (err) {
         error.EnvironmentVariableNotFound => return false,
@@ -2560,7 +2560,7 @@ pub const usage_init =
 ;
 
 pub fn cmdInit(
-    gpa: *Allocator,
+    _: *Allocator,
     arena: *Allocator,
     args: []const []const u8,
     output_mode: std.builtin.OutputMode,
src/Module.zig
@@ -774,7 +774,10 @@ pub const Fn = struct {
         ir.dumpFn(mod, func);
     }
 
-    pub fn deinit(func: *Fn, gpa: *Allocator) void {}
+    pub fn deinit(func: *Fn, gpa: *Allocator) void {
+        _ = func;
+        _ = gpa;
+    }
 };
 
 pub const Var = struct {
@@ -2209,6 +2212,7 @@ comptime {
 }
 
 pub fn astGenFile(mod: *Module, file: *Scope.File, prog_node: *std.Progress.Node) !void {
+    _ = prog_node;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3128,6 +3132,7 @@ pub const ImportFileResult = struct {
 };
 
 pub fn importPkg(mod: *Module, cur_pkg: *Package, pkg: *Package) !ImportFileResult {
+    _ = cur_pkg;
     const gpa = mod.gpa;
 
     // The resolved path is used as the key in the import table, to detect if
@@ -3384,7 +3389,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!vo
     decl.has_align = has_align;
     decl.has_linksection = has_linksection;
     decl.zir_decl_index = @intCast(u32, decl_sub_index);
-    if (decl.getFunction()) |func| {
+    if (decl.getFunction()) |_| {
         switch (mod.comp.bin_file.tag) {
             .coff => {
                 // TODO Implement for COFF
@@ -3753,6 +3758,7 @@ pub fn analyzeExport(
     errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1);
 }
 pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst {
+    _ = mod;
     const const_inst = try arena.create(ir.Inst.Constant);
     const_inst.* = .{
         .base = .{
@@ -4121,6 +4127,7 @@ pub fn floatAdd(
     lhs: Value,
     rhs: Value,
 ) !Value {
+    _ = src;
     switch (float_type.tag()) {
         .f16 => {
             @panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4154,6 +4161,7 @@ pub fn floatSub(
     lhs: Value,
     rhs: Value,
 ) !Value {
+    _ = src;
     switch (float_type.tag()) {
         .f16 => {
             @panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4187,6 +4195,7 @@ pub fn floatDiv(
     lhs: Value,
     rhs: Value,
 ) !Value {
+    _ = src;
     switch (float_type.tag()) {
         .f16 => {
             @panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4220,6 +4229,7 @@ pub fn floatMul(
     lhs: Value,
     rhs: Value,
 ) !Value {
+    _ = src;
     switch (float_type.tag()) {
         .f16 => {
             @panic("TODO add __trunctfhf2 to compiler-rt");
@@ -4253,6 +4263,7 @@ pub fn simplePtrType(
     mutable: bool,
     size: std.builtin.TypeInfo.Pointer.Size,
 ) Allocator.Error!Type {
+    _ = mod;
     if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
         return Type.initTag(.const_slice_u8);
     }
@@ -4287,6 +4298,7 @@ pub fn ptrType(
     @"volatile": bool,
     size: std.builtin.TypeInfo.Pointer.Size,
 ) Allocator.Error!Type {
+    _ = mod;
     assert(host_size == 0 or bit_offset < host_size * 8);
 
     // TODO check if type can be represented by simplePtrType
@@ -4304,6 +4316,7 @@ pub fn ptrType(
 }
 
 pub fn optionalType(mod: *Module, arena: *Allocator, child_type: Type) Allocator.Error!Type {
+    _ = mod;
     switch (child_type.tag()) {
         .single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
             arena,
@@ -4324,6 +4337,7 @@ pub fn arrayType(
     sentinel: ?Value,
     elem_type: Type,
 ) Allocator.Error!Type {
+    _ = mod;
     if (elem_type.eql(Type.initTag(.u8))) {
         if (sentinel) |some| {
             if (some.eql(Value.initTag(.zero))) {
@@ -4354,6 +4368,7 @@ pub fn errorUnionType(
     error_set: Type,
     payload: Type,
 ) Allocator.Error!Type {
+    _ = mod;
     assert(error_set.zigTypeTag() == .ErrorSet);
     if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
         return Type.initTag(.anyerror_void_error_union);
src/print_env.zig
@@ -5,6 +5,7 @@ const Allocator = std.mem.Allocator;
 const fatal = @import("main.zig").fatal;
 
 pub fn cmdEnv(gpa: *Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
+    _ = args;
     const self_exe_path = try std.fs.selfExePathAlloc(gpa);
     defer gpa.free(self_exe_path);
 
src/print_targets.zig
@@ -17,6 +17,7 @@ pub fn cmdTargets(
     stdout: anytype,
     native_target: Target,
 ) !void {
+    _ = args;
     var zig_lib_directory = introspect.findZigLibDir(allocator) catch |err| {
         fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
     };
src/register_manager.zig
@@ -265,6 +265,8 @@ fn MockFunction(comptime Register: type) type {
         }
 
         pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void {
+            _ = src;
+            _ = inst;
             try self.spilled.append(self.allocator, reg);
         }
     };
src/Sema.zig
@@ -702,6 +702,7 @@ fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I
 }
 
 fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirCoerceResultPtr", .{});
@@ -776,6 +777,7 @@ fn zirStructDecl(
 }
 
 fn createTypeName(sema: *Sema, block: *Scope.Block, name_strategy: Zir.Inst.NameStrategy) ![:0]u8 {
+    _ = block;
     switch (name_strategy) {
         .anon => {
             // It would be neat to have "struct:line:column" but this name has
@@ -1067,6 +1069,7 @@ fn zirOpaqueDecl(
     inst: Zir.Inst.Index,
     name_strategy: Zir.Inst.NameStrategy,
 ) InnerError!*Inst {
+    _ = name_strategy;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1242,6 +1245,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
     // TODO check if arg_name shadows a Decl
 
     if (block.inlining) |inlining| {
+        _ = inlining;
         return sema.param_inst_list[arg_index];
     }
 
@@ -1640,6 +1644,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
 }
 
 fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1648,6 +1653,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
 }
 
 fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1665,6 +1671,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
 }
 
 fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const arena = sema.arena;
     const inst_data = sema.code.instructions.items(.data)[inst].float;
     const src = inst_data.src();
@@ -1677,6 +1684,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*
 }
 
 fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const arena = sema.arena;
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data;
@@ -2358,6 +2366,7 @@ fn analyzeCall(
 }
 
 fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2466,6 +2475,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
 }
 
 fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2626,6 +2636,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
 }
 
 fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3056,6 +3067,7 @@ fn funcCommon(
     src_locs: Zir.Inst.Func.SrcLocs,
     opt_lib_name: ?[]const u8,
 ) InnerError!*Inst {
+    _ = inferred_error_set;
     const src: LazySrcLoc = .{ .node_offset = src_node_offset };
     const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
     const return_type = try sema.resolveType(block, ret_ty_src, zir_return_type);
@@ -3492,6 +3504,8 @@ fn zirSwitchCapture(
     is_multi: bool,
     is_ref: bool,
 ) InnerError!*Inst {
+    _ = is_ref;
+    _ = is_multi;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3509,6 +3523,7 @@ fn zirSwitchCaptureElse(
     inst: Zir.Inst.Index,
     is_ref: bool,
 ) InnerError!*Inst {
+    _ = is_ref;
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4511,12 +4526,15 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
 }
 
 fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
+    _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{});
 }
 
 fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirShr", .{});
@@ -4586,18 +4604,21 @@ fn zirBitwise(
 }
 
 fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{});
 }
 
 fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{});
 }
 
 fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayMul", .{});
@@ -5059,6 +5080,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
 }
 
 fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const zir_datas = sema.code.instructions.items(.data);
     const inst_data = zir_datas[inst].un_node;
     const src = inst_data.src();
@@ -5067,6 +5089,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
 }
 
 fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+    _ = block;
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     const operand_ptr = try sema.resolveInst(inst_data.operand);
@@ -5504,6 +5527,7 @@ fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner
 }
 
 fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+    _ = is_ref;
     const mod = sema.mod;
     const gpa = sema.gpa;
     const zir_datas = sema.code.instructions.items(.data);
@@ -5613,18 +5637,21 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
 }
 
 fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+    _ = is_ref;
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{});
 }
 
 fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+    _ = is_ref;
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{});
 }
 
 fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+    _ = is_ref;
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{});
@@ -6021,6 +6048,7 @@ fn zirAwait(
     inst: Zir.Inst.Index,
     is_nosuspend: bool,
 ) InnerError!*Inst {
+    _ = is_nosuspend;
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirAwait", .{});
@@ -6302,6 +6330,8 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id:
 }
 
 fn safetyPanic(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, panic_id: PanicId) !Zir.Inst.Index {
+    _ = sema;
+    _ = panic_id;
     // TODO Once we have a panic function to call, call it here instead of breakpoint.
     _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint);
     _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach);
@@ -6600,6 +6630,8 @@ fn elemPtrArray(
     elem_index: *Inst,
     elem_index_src: LazySrcLoc,
 ) InnerError!*Inst {
+    _ = elem_index;
+    _ = elem_index_src;
     if (array_ptr.value()) |array_ptr_val| {
         if (elem_index.value()) |index_val| {
             // Both array pointer and index are compile-time known.
@@ -7510,6 +7542,8 @@ fn resolveBuiltinTypeFields(
     ty: Type,
     name: []const u8,
 ) InnerError!Type {
+    _ = ty;
+    _ = name;
     const resolved_ty = try sema.getBuiltinType(block, src, name);
     return sema.resolveTypeFields(block, src, resolved_ty);
 }
src/stage1.zig
@@ -407,6 +407,8 @@ export fn stage2_add_link_lib(
     symbol_name_ptr: [*c]const u8,
     symbol_name_len: usize,
 ) ?[*:0]const u8 {
+    _ = symbol_name_len;
+    _ = symbol_name_ptr;
     const comp = @intToPtr(*Compilation, stage1.userdata);
     const lib_name = std.ascii.allocLowerString(comp.gpa, lib_name_ptr[0..lib_name_len]) catch return "out of memory";
     const target = comp.getTarget();
src/test.zig
@@ -70,6 +70,8 @@ const ErrorMsg = union(enum) {
         options: std.fmt.FormatOptions,
         writer: anytype,
     ) !void {
+        _ = fmt;
+        _ = options;
         switch (self) {
             .src => |src| {
                 return writer.print("{s}:{d}:{d}: {s}: {s}", .{
@@ -592,6 +594,7 @@ pub const TestContext = struct {
         thread_pool: *ThreadPool,
         global_cache_directory: Compilation.Directory,
     ) !void {
+        _ = self;
         const target_info = try std.zig.system.NativeTargetInfo.detect(allocator, case.target);
         const target = target_info.target;
 
src/tracy.zig
@@ -28,7 +28,9 @@ pub const ___tracy_c_zone_context = extern struct {
 };
 
 pub const Ctx = if (enable) ___tracy_c_zone_context else struct {
-    pub fn end(self: Ctx) void {}
+    pub fn end(self: Ctx) void {
+        _ = self;
+    }
 };
 
 pub inline fn trace(comptime src: std.builtin.SourceLocation) Ctx {
src/translate_c.zig
@@ -206,6 +206,7 @@ const Scope = struct {
     }
 
     fn findBlockReturnType(inner: *Scope, c: *Context) clang.QualType {
+        _ = c;
         var scope = inner;
         while (true) {
             switch (scope.id) {
@@ -601,7 +602,7 @@ fn visitFnDecl(c: *Context, fn_decl: *const clang.FunctionDecl) Error!void {
     var scope = &block_scope.base;
 
     var param_id: c_uint = 0;
-    for (proto_node.data.params) |*param, i| {
+    for (proto_node.data.params) |*param| {
         const param_name = param.name orelse {
             proto_node.data.is_extern = true;
             proto_node.data.is_export = false;
@@ -785,7 +786,7 @@ const builtin_typedef_map = std.ComptimeStringMap([]const u8, .{
 });
 
 fn transTypeDef(c: *Context, scope: *Scope, typedef_decl: *const clang.TypedefNameDecl) Error!void {
-    if (c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl()))) |name|
+    if (c.decl_table.get(@ptrToInt(typedef_decl.getCanonicalDecl()))) |_|
         return; // Avoid processing this decl twice
     const toplevel = scope.id == .root;
     const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
@@ -935,7 +936,7 @@ fn hasFlexibleArrayField(c: *Context, record_def: *const clang.RecordDecl) bool
 }
 
 fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordDecl) Error!void {
-    if (c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl()))) |name|
+    if (c.decl_table.get(@ptrToInt(record_decl.getCanonicalDecl()))) |_|
         return; // Avoid processing this decl twice
     const record_loc = record_decl.getLocation();
     const toplevel = scope.id == .root;
@@ -1080,7 +1081,7 @@ fn transRecordDecl(c: *Context, scope: *Scope, record_decl: *const clang.RecordD
 }
 
 fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) Error!void {
-    if (c.decl_table.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |name|
+    if (c.decl_table.get(@ptrToInt(enum_decl.getCanonicalDecl()))) |_|
         return; // Avoid processing this decl twice
     const enum_loc = enum_decl.getLocation();
     const toplevel = scope.id == .root;
@@ -1312,6 +1313,7 @@ fn transConvertVectorExpr(
     source_loc: clang.SourceLocation,
     expr: *const clang.ConvertVectorExpr,
 ) TransError!Node {
+    _ = source_loc;
     const base_stmt = @ptrCast(*const clang.Stmt, expr);
 
     var block_scope = try Scope.Block.init(c, scope, true);
@@ -1433,6 +1435,7 @@ fn transSimpleOffsetOfExpr(
     scope: *Scope,
     expr: *const clang.OffsetOfExpr,
 ) TransError!Node {
+    _ = scope;
     assert(expr.getNumComponents() == 1);
     const component = expr.getComponent(0);
     if (component.getKind() == .Field) {
@@ -2269,6 +2272,7 @@ fn transStringLiteralInitializer(
 /// both operands resolve to addresses. The C standard requires that both operands
 /// point to elements of the same array object, but we do not verify that here.
 fn cIsPointerDiffExpr(c: *Context, stmt: *const clang.BinaryOperator) bool {
+    _ = c;
     const lhs = @ptrCast(*const clang.Stmt, stmt.getLHS());
     const rhs = @ptrCast(*const clang.Stmt, stmt.getRHS());
     return stmt.getOpcode() == .Sub and
@@ -2572,6 +2576,7 @@ fn transInitListExprVector(
     expr: *const clang.InitListExpr,
     ty: *const clang.Type,
 ) TransError!Node {
+    _ = ty;
     const qt = getExprQualType(c, @ptrCast(*const clang.Expr, expr));
     const vector_type = try transQualType(c, scope, qt, loc);
     const init_count = expr.getNumInits();
@@ -2721,6 +2726,7 @@ fn transImplicitValueInitExpr(
     expr: *const clang.Expr,
     used: ResultUsed,
 ) TransError!Node {
+    _ = used;
     const source_loc = expr.getBeginLoc();
     const qt = getExprQualType(c, expr);
     const ty = qt.getTypePtr();
@@ -3407,6 +3413,7 @@ fn transUnaryExprOrTypeTraitExpr(
     stmt: *const clang.UnaryExprOrTypeTraitExpr,
     result_used: ResultUsed,
 ) TransError!Node {
+    _ = result_used;
     const loc = stmt.getBeginLoc();
     const type_node = try transQualType(c, scope, stmt.getTypeOfArgument(), loc);
 
@@ -3893,6 +3900,7 @@ fn maybeSuppressResult(
     used: ResultUsed,
     result: Node,
 ) TransError!Node {
+    _ = scope;
     if (used == .used) return result;
     return Tag.discard.create(c.arena, result);
 }
@@ -4337,7 +4345,7 @@ fn transCreateNodeMacroFn(c: *Context, name: []const u8, ref: Node, proto_alias:
     var fn_params = std.ArrayList(ast.Payload.Param).init(c.gpa);
     defer fn_params.deinit();
 
-    for (proto_alias.data.params) |param, i| {
+    for (proto_alias.data.params) |param| {
         const param_name = param.name orelse
             try std.fmt.allocPrint(c.arena, "arg_{d}", .{c.getMangle()});
 
@@ -5653,6 +5661,7 @@ fn parseCSpecifierQualifierList(c: *Context, m: *MacroCtx, scope: *Scope, allow_
 }
 
 fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
+    _ = scope;
     const KwCounter = struct {
         double: u8 = 0,
         long: u8 = 0,
@@ -5754,6 +5763,7 @@ fn parseCNumericType(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node {
 }
 
 fn parseCAbstractDeclarator(c: *Context, m: *MacroCtx, scope: *Scope, node: Node) ParseError!Node {
+    _ = scope;
     switch (m.next().?) {
         .Asterisk => {
             // last token of `node`
src/type.zig
@@ -600,9 +600,11 @@ pub const Type = extern union {
 
     pub const HashContext = struct {
         pub fn hash(self: @This(), t: Type) u64 {
+            _ = self;
             return t.hash();
         }
         pub fn eql(self: @This(), a: Type, b: Type) bool {
+            _ = self;
             return a.eql(b);
         }
     };
@@ -777,6 +779,7 @@ pub const Type = extern union {
         options: std.fmt.FormatOptions,
         writer: anytype,
     ) @TypeOf(writer).Error!void {
+        _ = options;
         comptime assert(fmt.len == 0);
         var ty = start_type;
         while (true) {
src/value.zig
@@ -626,6 +626,7 @@ pub const Value = extern union {
             return std.mem.dupe(allocator, u8, payload.data);
         }
         if (self.castTag(.repeated)) |payload| {
+            _ = payload;
             @panic("TODO implement toAllocatedBytes for this Value tag");
         }
         if (self.castTag(.decl_ref)) |payload| {
@@ -747,6 +748,7 @@ pub const Value = extern union {
 
     /// Asserts the type is an enum type.
     pub fn toEnum(val: Value, enum_ty: Type, comptime E: type) E {
+        _ = enum_ty;
         // TODO this needs to resolve other kinds of Value tags rather than
         // assuming the tag will be .enum_field_index.
         const field_index = val.castTag(.enum_field_index).?.data;
@@ -935,6 +937,7 @@ pub const Value = extern union {
     /// Converts an integer or a float to a float.
     /// Returns `error.Overflow` if the value does not fit in the new type.
     pub fn floatCast(self: Value, allocator: *Allocator, ty: Type, target: Target) !Value {
+        _ = target;
         switch (ty.tag()) {
             .f16 => {
                 @panic("TODO add __trunctfhf2 to compiler-rt");
@@ -1292,17 +1295,21 @@ pub const Value = extern union {
 
     pub const ArrayHashContext = struct {
         pub fn hash(self: @This(), v: Value) u32 {
+            _ = self;
             return v.hash_u32();
         }
         pub fn eql(self: @This(), a: Value, b: Value) bool {
+            _ = self;
             return a.eql(b);
         }
     };
     pub const HashContext = struct {
         pub fn hash(self: @This(), v: Value) u64 {
+            _ = self;
             return v.hash();
         }
         pub fn eql(self: @This(), a: Value, b: Value) bool {
+            _ = self;
             return a.eql(b);
         }
     };
@@ -1345,6 +1352,7 @@ pub const Value = extern union {
     }
 
     pub fn fieldValue(val: Value, allocator: *Allocator, index: usize) error{OutOfMemory}!Value {
+        _ = allocator;
         switch (val.tag()) {
             .@"struct" => {
                 const field_values = val.castTag(.@"struct").?.data;
src/Zir.zig
@@ -4433,6 +4433,7 @@ const Writer = struct {
     }
 
     fn writeInstIndex(self: *Writer, stream: anytype, inst: Inst.Index) !void {
+        _ = self;
         return stream.print("%{d}", .{inst});
     }
 
@@ -4453,6 +4454,7 @@ const Writer = struct {
         name: []const u8,
         flag: bool,
     ) !void {
+        _ = self;
         if (!flag) return;
         try stream.writeAll(name);
     }
test/behavior/bugs/1310.zig
@@ -16,6 +16,8 @@ pub const InvocationTable_ = struct_InvocationTable_;
 pub const VM_ = struct_VM_;
 
 fn agent_callback(_vm: [*]VM, options: [*]u8) callconv(.C) i32 {
+    _ = _vm;
+    _ = options;
     return 11;
 }
 
test/behavior/bugs/2578.zig
@@ -5,7 +5,9 @@ const Foo = struct {
 var foo: Foo = undefined;
 const t = &foo;
 
-fn bar(pointer: ?*c_void) void {}
+fn bar(pointer: ?*c_void) void {
+    _ = pointer;
+}
 
 test "fixed" {
     bar(t);
test/behavior/bugs/2692.zig
@@ -1,4 +1,6 @@
-fn foo(a: []u8) void {}
+fn foo(a: []u8) void {
+    _ = a;
+}
 
 test "address of 0 length array" {
     var pt: [0]u8 = undefined;
test/behavior/bugs/3367.zig
@@ -3,7 +3,9 @@ const Foo = struct {
 };
 
 const Mixin = struct {
-    pub fn two(self: Foo) void {}
+    pub fn two(self: Foo) void {
+        _ = self;
+    }
 };
 
 test "container member access usingnamespace decls" {
test/behavior/bugs/4328.zig
@@ -53,10 +53,12 @@ test "Peer resolution of extern function calls in @TypeOf" {
 test "Extern function calls, dereferences and field access in @TypeOf" {
     const Test = struct {
         fn test_fn_1(a: c_long) @TypeOf(fopen("test", "r").*) {
+            _ = a;
             return .{ .dummy_field = 0 };
         }
 
         fn test_fn_2(a: anytype) @TypeOf(fopen("test", "r").*.dummy_field) {
+            _ = a;
             return 255;
         }
 
test/behavior/bugs/4560.zig
@@ -25,6 +25,10 @@ pub fn StringHashMap(comptime V: type) type {
 }
 
 pub fn HashMap(comptime K: type, comptime V: type) type {
+    if (false) {
+        K;
+        V;
+    }
     return struct {
         size: usize,
         max_distance_from_start_index: usize,
test/behavior/bugs/529_other_file_2.zig
@@ -1,4 +1,6 @@
 pub const A = extern struct {
     field: c_int,
 };
-export fn issue529(a: ?*A) void {}
+export fn issue529(a: ?*A) void {
+    _ = a;
+}
test/behavior/bugs/5487.zig
@@ -1,6 +1,7 @@
 const io = @import("std").io;
 
 pub fn write(_: void, bytes: []const u8) !usize {
+    _ = bytes;
     return 0;
 }
 pub fn writer() io.Writer(void, @typeInfo(@typeInfo(@TypeOf(write)).Fn.return_type.?).ErrorUnion.error_set, write) {
test/behavior/bugs/624.zig
@@ -12,6 +12,7 @@ const ListenerContext = struct {
 const ContextAllocator = MemoryPool(TestContext);
 
 fn MemoryPool(comptime T: type) type {
+    _ = T;
     return struct {
         n: usize,
     };
test/behavior/bugs/679.zig
@@ -2,6 +2,7 @@ const std = @import("std");
 const expect = std.testing.expect;
 
 pub fn List(comptime T: type) type {
+    _ = T;
     return u32;
 }
 
test/behavior/bugs/7027.zig
@@ -9,7 +9,9 @@ const Foobar = struct {
     }
 };
 
-fn foo(arg: anytype) void {}
+fn foo(arg: anytype) void {
+    _ = arg;
+}
 
 test "" {
     comptime var foobar = Foobar.foo();
test/behavior/bugs/704.zig
@@ -1,5 +1,7 @@
 const xxx = struct {
-    pub fn bar(self: *xxx) void {}
+    pub fn bar(self: *xxx) void {
+        _ = self;
+    }
 };
 test "bug 704" {
     var x: xxx = undefined;
test/behavior/bugs/7250.zig
@@ -3,7 +3,9 @@ const nrfx_uart_t = extern struct {
     drv_inst_idx: u8,
 };
 
-pub fn nrfx_uart_rx(p_instance: [*c]const nrfx_uart_t) void {}
+pub fn nrfx_uart_rx(p_instance: [*c]const nrfx_uart_t) void {
+    _ = p_instance;
+}
 
 threadlocal var g_uart0 = nrfx_uart_t{
     .p_reg = 0,
test/behavior/bugs/828.zig
@@ -4,6 +4,7 @@ const CountBy = struct {
     const One = CountBy{ .a = 1 };
 
     pub fn counter(self: *const CountBy) Counter {
+        _ = self;
         return Counter{ .i = 0 };
     }
 };
@@ -18,6 +19,7 @@ const Counter = struct {
 };
 
 fn constCount(comptime cb: *const CountBy, comptime unused: u32) void {
+    _ = unused;
     comptime {
         var cnt = cb.counter();
         if (cnt.i != 0) @compileError("Counter instance reused!");
test/behavior/bugs/920.zig
@@ -46,6 +46,8 @@ fn norm_f_inv(y: f64) f64 {
     return math.sqrt(-2.0 * math.ln(y));
 }
 fn norm_zero_case(random: *Random, u: f64) f64 {
+    _ = random;
+    _ = u;
     return 0.0;
 }
 
test/behavior/align.zig
@@ -167,6 +167,7 @@ test "generic function with align param" {
 }
 
 fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 {
+    _ = align_bytes;
     return 0x1;
 }
 
test/behavior/async_fn.zig
@@ -133,6 +133,7 @@ test "@frameSize" {
             other(1);
         }
         fn other(param: i32) void {
+            _ = param;
             var local: i32 = undefined;
             _ = local;
             suspend {}
@@ -635,6 +636,8 @@ test "returning a const error from async function" {
         }
 
         fn fetchUrl(unused: i32, url: []const u8) ![]u8 {
+            _ = unused;
+            _ = url;
             frame = @frame();
             suspend {}
             ok = true;
@@ -711,6 +714,7 @@ fn testAsyncAwaitTypicalUsage(
 
         var global_download_frame: anyframe = undefined;
         fn fetchUrl(allocator: *std.mem.Allocator, url: []const u8) anyerror![]u8 {
+            _ = url;
             const result = try std.mem.dupe(allocator, u8, "expected download text");
             errdefer allocator.free(result);
             if (suspend_download) {
@@ -724,6 +728,7 @@ fn testAsyncAwaitTypicalUsage(
 
         var global_file_frame: anyframe = undefined;
         fn readFile(allocator: *std.mem.Allocator, filename: []const u8) anyerror![]u8 {
+            _ = filename;
             const result = try std.mem.dupe(allocator, u8, "expected file text");
             errdefer allocator.free(result);
             if (suspend_file) {
@@ -1226,6 +1231,7 @@ test "suspend in while loop" {
                 suspend {}
                 return val;
             } else |err| {
+                err catch {};
                 return 0;
             }
         }
@@ -1355,6 +1361,7 @@ test "async function passed 0-bit arg after non-0-bit arg" {
         }
 
         fn bar(x: i32, args: anytype) anyerror!void {
+            _ = args;
             global_frame = @frame();
             suspend {}
             global_int = x;
@@ -1650,6 +1657,8 @@ test "@asyncCall with pass-by-value arguments" {
         pub const AT = [5]u8;
 
         pub fn f(_fill0: u64, s: ST, _fill1: u64, a: AT, _fill2: u64) callconv(.Async) void {
+            _ = s;
+            _ = a;
             // Check that the array and struct arguments passed by value don't
             // end up overflowing the adjacent fields in the frame structure.
             expectEqual(F0, _fill0) catch @panic("test failure");
@@ -1677,6 +1686,7 @@ test "@asyncCall with arguments having non-standard alignment" {
 
     const S = struct {
         pub fn f(_fill0: u32, s: struct { x: u64 align(16) }, _fill1: u64) callconv(.Async) void {
+            _ = s;
             // The compiler inserts extra alignment for s, check that the
             // generated code picks the right slot for fill1.
             expectEqual(F0, _fill0) catch @panic("test failure");
test/behavior/cast.zig
@@ -824,9 +824,13 @@ test "variable initialization uses result locations properly with regards to the
 test "cast between [*c]T and ?[*:0]T on fn parameter" {
     const S = struct {
         const Handler = ?fn ([*c]const u8) callconv(.C) void;
-        fn addCallback(handler: Handler) void {}
+        fn addCallback(handler: Handler) void {
+            _ = handler;
+        }
 
-        fn myCallback(cstr: ?[*:0]const u8) callconv(.C) void {}
+        fn myCallback(cstr: ?[*:0]const u8) callconv(.C) void {
+            _ = cstr;
+        }
 
         fn doTheTest() void {
             addCallback(myCallback);
test/behavior/error.zig
@@ -139,7 +139,10 @@ test "comptime test error for empty error set" {
 const EmptyErrorSet = error{};
 
 fn testComptimeTestErrorEmptySet(x: EmptyErrorSet!i32) !void {
-    if (x) |v| try expect(v == 1234) else |err| @compileError("bad");
+    if (x) |v| try expect(v == 1234) else |err| {
+        _ = err;
+        @compileError("bad");
+    }
 }
 
 test "syntax: optional operator in front of error union operator" {
@@ -394,6 +397,7 @@ test "function pointer with return type that is error union with payload which i
         const Err = error{UnspecifiedErr};
 
         fn bar(a: i32) anyerror!*Foo {
+            _ = a;
             return Err.UnspecifiedErr;
         }
 
@@ -448,7 +452,9 @@ test "error payload type is correctly resolved" {
 
 test "error union comptime caching" {
     const S = struct {
-        fn foo(comptime arg: anytype) void {}
+        fn foo(comptime arg: anytype) void {
+            arg catch {};
+        }
     };
 
     S.foo(@as(anyerror!void, {}));
test/behavior/eval.zig
@@ -422,6 +422,7 @@ test {
 }
 
 pub fn TypeWithCompTimeSlice(comptime field_name: []const u8) type {
+    _ = field_name;
     return struct {
         pub const Node = struct {};
     };
@@ -698,7 +699,9 @@ test "refer to the type of a generic function" {
     f(i32);
 }
 
-fn doNothingWithType(comptime T: type) void {}
+fn doNothingWithType(comptime T: type) void {
+    _ = T;
+}
 
 test "zero extend from u0 to u1" {
     var zero_u0: u0 = 0;
@@ -819,7 +822,9 @@ test "two comptime calls with array default initialized to undefined" {
                 result.getCpuArch();
             }
 
-            pub fn getCpuArch(self: CrossTarget) void {}
+            pub fn getCpuArch(self: CrossTarget) void {
+                _ = self;
+            }
         };
 
         const DynamicLinker = struct {
test/behavior/fn.zig
@@ -23,6 +23,7 @@ test "void parameters" {
     try voidFun(1, void{}, 2, {});
 }
 fn voidFun(a: i32, b: void, c: i32, d: void) !void {
+    _ = d;
     const v = b;
     const vv: void = if (a == 1) v else {};
     try expect(a + c == 3);
@@ -57,7 +58,9 @@ test "call function with empty string" {
     acceptsString("");
 }
 
-fn acceptsString(foo: []u8) void {}
+fn acceptsString(foo: []u8) void {
+    _ = foo;
+}
 
 fn @"weird function name"() i32 {
     return 1234;
@@ -70,7 +73,9 @@ test "implicit cast function unreachable return" {
     wantsFnWithVoid(fnWithUnreachable);
 }
 
-fn wantsFnWithVoid(f: fn () void) void {}
+fn wantsFnWithVoid(f: fn () void) void {
+    _ = f;
+}
 
 fn fnWithUnreachable() noreturn {
     unreachable;
@@ -162,6 +167,7 @@ const Point3 = struct {
     y: i32,
 
     fn addPointCoords(self: Point3, comptime T: type) i32 {
+        _ = T;
         return self.x + self.y;
     }
 };
test/behavior/for.zig
@@ -29,10 +29,14 @@ test "for loop with pointer elem var" {
     mangleString(target[0..]);
     try expect(mem.eql(u8, &target, "bcdefgh"));
 
-    for (source) |*c, i|
+    for (source) |*c, i| {
+        _ = i;
         try expect(@TypeOf(c) == *const u8);
-    for (target) |*c, i|
+    }
+    for (target) |*c, i| {
+        _ = i;
         try expect(@TypeOf(c) == *u8);
+    }
 }
 
 fn mangleString(s: []u8) void {
@@ -53,6 +57,7 @@ test "basic for loop" {
         buf_index += 1;
     }
     for (array) |item, index| {
+        _ = item;
         buffer[buf_index] = @intCast(u8, index);
         buf_index += 1;
     }
@@ -62,6 +67,7 @@ test "basic for loop" {
         buf_index += 1;
     }
     for (array_ptr) |item, index| {
+        _ = item;
         buffer[buf_index] = @intCast(u8, index);
         buf_index += 1;
     }
@@ -70,7 +76,7 @@ test "basic for loop" {
         buffer[buf_index] = item;
         buf_index += 1;
     }
-    for (unknown_size) |item, index| {
+    for (unknown_size) |_, index| {
         buffer[buf_index] = @intCast(u8, index);
         buf_index += 1;
     }
@@ -118,6 +124,7 @@ test "2 break statements and an else" {
             var buf: [10]u8 = undefined;
             var ok = false;
             ok = for (buf) |item| {
+                _ = item;
                 if (f) break false;
                 if (t) break true;
             } else false;
@@ -136,6 +143,7 @@ test "for with null and T peer types and inferred result location type" {
                     break item;
                 }
             } else null) |v| {
+                _ = v;
                 @panic("fail");
             }
         }
test/behavior/if.zig
@@ -45,7 +45,7 @@ var global_with_err: anyerror!u32 = error.SomeError;
 test "unwrap mutable global var" {
     if (global_with_val) |v| {
         try expect(v == 0);
-    } else |e| {
+    } else |_| {
         unreachable;
     }
     if (global_with_err) |_| {
test/behavior/misc.zig
@@ -245,14 +245,18 @@ var some_mem: [100]u8 = undefined;
 fn memAlloc(comptime T: type, n: usize) anyerror![]T {
     return @ptrCast([*]T, &some_mem[0])[0..n];
 }
-fn memFree(comptime T: type, memory: []T) void {}
+fn memFree(comptime T: type, memory: []T) void {
+    _ = memory;
+}
 
 test "cast undefined" {
     const array: [100]u8 = undefined;
     const slice = @as([]const u8, &array);
     testCastUndefined(slice);
 }
-fn testCastUndefined(x: []const u8) void {}
+fn testCastUndefined(x: []const u8) void {
+    _ = x;
+}
 
 test "cast small unsigned to larger signed" {
     try expect(castSmallUnsignedToLargerSigned1(200) == @as(i16, 200));
@@ -452,6 +456,7 @@ test "@typeName" {
 }
 
 fn TypeFromFn(comptime T: type) type {
+    _ = T;
     return struct {};
 }
 
@@ -555,7 +560,12 @@ test "packed struct, enum, union parameters in extern function" {
     }), &(PackedUnion{ .a = 1 }));
 }
 
-export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion) void {}
+export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion) void {
+    if (false) {
+        a;
+        b;
+    }
+}
 
 test "slicing zero length array" {
     const s1 = ""[0..];
@@ -584,6 +594,7 @@ test "self reference through fn ptr field" {
         };
 
         fn foo(a: A) u8 {
+            _ = a;
             return 12;
         }
     };
@@ -753,7 +764,9 @@ test "extern variable with non-pointer opaque type" {
 
 test "lazy typeInfo value as generic parameter" {
     const S = struct {
-        fn foo(args: anytype) void {}
+        fn foo(args: anytype) void {
+            _ = args;
+        }
     };
     S.foo(@typeInfo(@TypeOf(.{})));
 }
test/behavior/null.zig
@@ -130,6 +130,7 @@ var struct_with_optional: StructWithOptional = undefined;
 test "unwrap optional which is field of global var" {
     struct_with_optional.field = null;
     if (struct_with_optional.field) |payload| {
+        _ = payload;
         unreachable;
     }
     struct_with_optional.field = 1234;
test/behavior/optional.zig
@@ -161,6 +161,7 @@ test "self-referential struct through a slice of optional" {
 test "assigning to an unwrapped optional field in an inline loop" {
     comptime var maybe_pos_arg: ?comptime_int = null;
     inline for ("ab") |x| {
+        _ = x;
         maybe_pos_arg = 0;
         if (maybe_pos_arg.? != 0) {
             @compileError("bad");
test/behavior/pointers.zig
@@ -179,6 +179,7 @@ test "assign null directly to C pointer and test null equality" {
     try expect(!(x != null));
     try expect(!(null != x));
     if (x) |same_x| {
+        _ = same_x;
         @panic("fail");
     }
     var otherx: i32 = undefined;
@@ -189,7 +190,10 @@ test "assign null directly to C pointer and test null equality" {
     comptime try expect(null == y);
     comptime try expect(!(y != null));
     comptime try expect(!(null != y));
-    if (y) |same_y| @panic("fail");
+    if (y) |same_y| {
+        _ = same_y;
+        @panic("fail");
+    }
     const othery: i32 = undefined;
     comptime try expect((y orelse &othery) == &othery);
 
test/behavior/reflection.zig
@@ -15,6 +15,11 @@ test "reflection: function return type, var args, and param types" {
 }
 
 fn dummy(a: bool, b: i32, c: f32) i32 {
+    if (false) {
+        a;
+        b;
+        c;
+    }
     return 1234;
 }
 
test/behavior/struct.zig
@@ -182,6 +182,7 @@ test "empty struct method call" {
 }
 const EmptyStruct = struct {
     fn method(es: *const EmptyStruct) i32 {
+        _ = es;
         return 1234;
     }
 };
@@ -452,9 +453,11 @@ fn alloc(comptime T: type) []T {
 test "call method with mutable reference to struct with no fields" {
     const S = struct {
         fn doC(s: *const @This()) bool {
+            _ = s;
             return true;
         }
         fn do(s: *@This()) bool {
+            _ = s;
             return true;
         }
     };
@@ -625,11 +628,13 @@ test "for loop over pointers to struct, getting field from struct pointer" {
         var ok = true;
 
         fn eql(a: []const u8) bool {
+            _ = a;
             return true;
         }
 
         const ArrayList = struct {
             fn toSlice(self: *ArrayList) []*Foo {
+                _ = self;
                 return @as([*]*Foo, undefined)[0..0];
             }
         };
test/behavior/switch.zig
@@ -386,6 +386,7 @@ test "switch with null and T peer types and inferred result location type" {
                 0 => true,
                 else => null,
             }) |v| {
+                _ = v;
                 @panic("fail");
             }
         }
@@ -411,12 +412,18 @@ test "switch prongs with cases with identical payload types" {
                     try expect(@TypeOf(e) == usize);
                     try expect(e == 8);
                 },
-                .B => |e| @panic("fail"),
+                .B => |e| {
+                    _ = e;
+                    @panic("fail");
+                },
             }
         }
         fn doTheSwitch2(u: Union) !void {
             switch (u) {
-                .A, .C => |e| @panic("fail"),
+                .A, .C => |e| {
+                    _ = e;
+                    @panic("fail");
+                },
                 .B => |e| {
                     try expect(@TypeOf(e) == isize);
                     try expect(e == -8);
@@ -508,7 +515,10 @@ test "switch on error set with single else" {
         fn doTheTest() !void {
             var some: error{Foo} = error.Foo;
             try expect(switch (some) {
-                else => |a| true,
+                else => |a| blk: {
+                    a catch {};
+                    break :blk true;
+                },
             });
         }
     };
test/behavior/type.zig
@@ -431,6 +431,10 @@ test "Type.Fn" {
 
     const foo = struct {
         fn func(a: usize, b: bool) align(4) callconv(.C) usize {
+            if (false) {
+                a;
+                b;
+            }
             return 0;
         }
     }.func;
@@ -444,7 +448,9 @@ test "Type.BoundFn" {
     if (builtin.target.cpu.arch == .wasm32 or builtin.target.cpu.arch == .wasm64) return error.SkipZigTest;
 
     const TestStruct = packed struct {
-        pub fn foo(self: *const @This()) align(4) callconv(.Unspecified) void {}
+        pub fn foo(self: *const @This()) align(4) callconv(.Unspecified) void {
+            _ = self;
+        }
     };
     const test_instance: TestStruct = undefined;
     try testing.expect(std.meta.eql(
test/behavior/type_info.zig
@@ -277,7 +277,9 @@ const TestStruct = packed struct {
     fieldC: *Self,
     fieldD: u32 = 4,
 
-    pub fn foo(self: *const Self) void {}
+    pub fn foo(self: *const Self) void {
+        _ = self;
+    }
     const Self = @This();
 };
 
@@ -326,7 +328,9 @@ extern fn fooAligned(a: usize, b: bool, ...) align(4) callconv(.C) usize;
 
 test "typeInfo with comptime parameter in struct fn def" {
     const S = struct {
-        pub fn func(comptime x: f32) void {}
+        pub fn func(comptime x: f32) void {
+            _ = x;
+        }
     };
     comptime var info = @typeInfo(S);
     _ = info;
@@ -369,6 +373,7 @@ test "type info: pass to function" {
 }
 
 fn passTypeInfo(comptime info: TypeInfo) type {
+    _ = info;
     return void;
 }
 
test/behavior/underscore.zig
@@ -7,7 +7,9 @@ test "ignore lval with underscore" {
 
 test "ignore lval with underscore (for loop)" {
     for ([_]void{}) |_, i| {
+        _ = i;
         for ([_]void{}) |_, j| {
+            _ = j;
             break;
         }
         break;
test/behavior/union.zig
@@ -374,7 +374,9 @@ const Attribute = union(enum) {
     B: u8,
 };
 
-fn setAttribute(attr: Attribute) void {}
+fn setAttribute(attr: Attribute) void {
+    _ = attr;
+}
 
 fn Setter(attr: Attribute) type {
     return struct {
@@ -465,7 +467,9 @@ test "union no tag with struct member" {
     const Struct = struct {};
     const Union = union {
         s: Struct,
-        pub fn foo(self: *@This()) void {}
+        pub fn foo(self: *@This()) void {
+            _ = self;
+        }
     };
     var u = Union{ .s = Struct{} };
     u.foo();
@@ -703,6 +707,7 @@ test "method call on an empty union" {
             X2: [0]u8,
 
             pub fn useIt(self: *@This()) bool {
+                _ = self;
                 return true;
             }
         };
@@ -771,6 +776,7 @@ test "@unionInit on union w/ tag but no fields" {
             no_op: void,
 
             pub fn decode(buf: []const u8) Data {
+                _ = buf;
                 return @unionInit(Data, "no_op", {});
             }
         };
test/behavior/var_args.zig
@@ -48,6 +48,7 @@ test "runtime parameter before var args" {
 }
 
 fn extraFn(extra: u32, args: anytype) !usize {
+    _ = extra;
     if (args.len >= 1) {
         try expect(args[0] == false);
     }
@@ -63,9 +64,11 @@ const foos = [_]fn (anytype) bool{
 };
 
 fn foo1(args: anytype) bool {
+    _ = args;
     return true;
 }
 fn foo2(args: anytype) bool {
+    _ = args;
     return false;
 }
 
test/behavior/while.zig
@@ -151,14 +151,14 @@ test "while on optional with else result follow break prong" {
 test "while on error union with else result follow else prong" {
     const result = while (returnError()) |value| {
         break value;
-    } else |err| @as(i32, 2);
+    } else |_| @as(i32, 2);
     try expect(result == 2);
 }
 
 test "while on error union with else result follow break prong" {
     const result = while (returnSuccess(10)) |value| {
         break value;
-    } else |err| @as(i32, 2);
+    } else |_| @as(i32, 2);
     try expect(result == 10);
 }
 
test/stage2/cbe.zig
@@ -823,31 +823,35 @@ pub fn addCases(ctx: *TestContext) !void {
         \\
     );
     ctx.h("header with single param function", linux_x64,
-        \\export fn start(a: u8) void{}
+        \\export fn start(a: u8) void{
+        \\    _ = a;
+        \\}
     ,
         \\ZIG_EXTERN_C void start(uint8_t a0);
         \\
     );
     ctx.h("header with multiple param function", linux_x64,
-        \\export fn start(a: u8, b: u8, c: u8) void{}
+        \\export fn start(a: u8, b: u8, c: u8) void{
+        \\  _ = a; _ = b; _ = c;
+        \\}
     ,
         \\ZIG_EXTERN_C void start(uint8_t a0, uint8_t a1, uint8_t a2);
         \\
     );
     ctx.h("header with u32 param function", linux_x64,
-        \\export fn start(a: u32) void{}
+        \\export fn start(a: u32) void{ _ = a; }
     ,
         \\ZIG_EXTERN_C void start(uint32_t a0);
         \\
     );
     ctx.h("header with usize param function", linux_x64,
-        \\export fn start(a: usize) void{}
+        \\export fn start(a: usize) void{ _ = a; }
     ,
         \\ZIG_EXTERN_C void start(uintptr_t a0);
         \\
     );
     ctx.h("header with bool param function", linux_x64,
-        \\export fn start(a: bool) void{}
+        \\export fn start(a: bool) void{_ = a;}
     ,
         \\ZIG_EXTERN_C void start(bool a0);
         \\
@@ -871,7 +875,7 @@ pub fn addCases(ctx: *TestContext) !void {
         \\
     );
     ctx.h("header with multiple includes", linux_x64,
-        \\export fn start(a: u32, b: usize) void{}
+        \\export fn start(a: u32, b: usize) void{ _ = a; _ = b; }
     ,
         \\ZIG_EXTERN_C void start(uint32_t a0, uintptr_t a1);
         \\
test/stage2/test.zig
@@ -1392,7 +1392,9 @@ pub fn addCases(ctx: *TestContext) !void {
             \\pub fn main() void {
             \\    doNothing(0);
             \\}
-            \\fn doNothing(arg: u0) void {}
+            \\fn doNothing(arg: u0) void {
+            \\    _ = arg;
+            \\}
         ,
             "",
         );
test/stage2/wasm.zig
@@ -64,7 +64,7 @@ pub fn addCases(ctx: *TestContext) !void {
             \\    foo(10, 20);
             \\    return 5;
             \\}
-            \\fn foo(x: u32, y: u32) void {}
+            \\fn foo(x: u32, y: u32) void { _ = x; _ = y; }
         , "5\n");
     }
 
@@ -95,6 +95,7 @@ pub fn addCases(ctx: *TestContext) !void {
             \\    return i;
             \\}
             \\fn foo(x: u32, y: u32) void {
+            \\    _  = y;
             \\    var i: u32 = 10;
             \\    i = x;
             \\}
test/standalone/hello_world/hello_libc.zig
@@ -8,6 +8,8 @@ const c = @cImport({
 const msg = "Hello, world!\n";
 
 pub export fn main(argc: c_int, argv: **u8) c_int {
+    _ = argv;
+    _ = argc;
     if (c.printf(msg) != @intCast(c_int, c.strlen(msg))) return -1;
     return 0;
 }
test/standalone/issue_339/test.zig
@@ -1,5 +1,7 @@
 const StackTrace = @import("std").builtin.StackTrace;
 pub fn panic(msg: []const u8, stack_trace: ?*StackTrace) noreturn {
+    _ = msg;
+    _ = stack_trace;
     @breakpoint();
     while (true) {}
 }
test/standalone/issue_8550/main.zig
@@ -1,6 +1,8 @@
-export fn main(r0: u32, r1: u32, atags: u32) callconv(.C) noreturn {
+export fn main() callconv(.C) noreturn {
     unreachable; // never gets run so it doesn't matter
 }
 pub fn panic(msg: []const u8, error_return_trace: ?*@import("std").builtin.StackTrace) noreturn {
+    _ = msg;
+    _ = error_return_trace;
     while (true) {}
 }
test/tests.zig
@@ -417,6 +417,8 @@ pub fn addStandaloneTests(b: *build.Builder, test_filter: ?[]const u8, modes: []
 }
 
 pub fn addCliTests(b: *build.Builder, test_filter: ?[]const u8, modes: []const Mode) *build.Step {
+    _ = test_filter;
+    _ = modes;
     const step = b.step("test-cli", "Test the command line interface");
 
     const exe = b.addExecutable("test-cli", "test/cli.zig");
tools/process_headers.zig
@@ -236,12 +236,14 @@ const DestTarget = struct {
 
     const HashContext = struct {
         pub fn hash(self: @This(), a: DestTarget) u32 {
+            _ = self;
             return @enumToInt(a.arch) +%
                 (@enumToInt(a.os) *% @as(u32, 4202347608)) +%
                 (@enumToInt(a.abi) *% @as(u32, 4082223418));
         }
 
         pub fn eql(self: @This(), a: DestTarget, b: DestTarget) bool {
+            _ = self;
             return a.arch.eql(b.arch) and
                 a.os == b.os and
                 a.abi == b.abi;
@@ -256,6 +258,7 @@ const Contents = struct {
     is_generic: bool,
 
     fn hitCountLessThan(context: void, lhs: *const Contents, rhs: *const Contents) bool {
+        _ = context;
         return lhs.hit_count < rhs.hit_count;
     }
 };
tools/update_clang_options.zig
@@ -585,6 +585,8 @@ const Syntax = union(enum) {
         options: std.fmt.FormatOptions,
         out_stream: anytype,
     ) !void {
+        _ = fmt;
+        _ = options;
         switch (self) {
             .multi_arg => |n| return out_stream.print(".{{.{s}={}}}", .{ @tagName(self), n }),
             else => return out_stream.print(".{s}", .{@tagName(self)}),
@@ -663,6 +665,7 @@ fn syntaxMatchesWithEql(syntax: Syntax) bool {
 }
 
 fn objectLessThan(context: void, a: *json.ObjectMap, b: *json.ObjectMap) bool {
+    _ = context;
     // Priority is determined by exact matches first, followed by prefix matches in descending
     // length, with key as a final tiebreaker.
     const a_syntax = objSyntax(a);
tools/update_cpu_features.zig
@@ -1227,14 +1227,17 @@ fn usageAndExit(file: fs.File, arg0: []const u8, code: u8) noreturn {
 }
 
 fn featureLessThan(context: void, a: Feature, b: Feature) bool {
+    _ = context;
     return std.ascii.lessThanIgnoreCase(a.zig_name, b.zig_name);
 }
 
 fn cpuLessThan(context: void, a: Cpu, b: Cpu) bool {
+    _ = context;
     return std.ascii.lessThanIgnoreCase(a.zig_name, b.zig_name);
 }
 
 fn asciiLessThan(context: void, a: []const u8, b: []const u8) bool {
+    _ = context;
     return std.ascii.lessThanIgnoreCase(a, b);
 }
 
tools/update_glibc.zig
@@ -155,7 +155,7 @@ pub fn main() !void {
         }
         const fn_set = &target_funcs_gop.value_ptr.list;
 
-        for (lib_names) |lib_name, lib_name_index| {
+        for (lib_names) |lib_name| {
             const lib_prefix = if (std.mem.eql(u8, lib_name, "ld")) "" else "lib";
             const basename = try fmt.allocPrint(allocator, "{s}{s}.abilist", .{ lib_prefix, lib_name });
             const abi_list_filename = blk: {
@@ -263,7 +263,7 @@ pub fn main() !void {
 
     // Now the mapping of version and function to integer index is complete.
     // Here we create a mapping of function name to list of versions.
-    for (abi_lists) |*abi_list, abi_index| {
+    for (abi_lists) |*abi_list| {
         const value = target_functions.getPtr(@ptrToInt(abi_list)).?;
         const fn_vers_list = &value.fn_vers_list;
         for (value.list.items) |*ver_fn| {
@@ -286,7 +286,7 @@ pub fn main() !void {
         const abilist_txt = buffered.writer();
 
         // first iterate over the abi lists
-        for (abi_lists) |*abi_list, abi_index| {
+        for (abi_lists) |*abi_list| {
             const fn_vers_list = &target_functions.getPtr(@ptrToInt(abi_list)).?.fn_vers_list;
             for (abi_list.targets) |target, it_i| {
                 if (it_i != 0) try abilist_txt.writeByte(' ');
@@ -312,10 +312,12 @@ pub fn main() !void {
 }
 
 pub fn strCmpLessThan(context: void, a: []const u8, b: []const u8) bool {
+    _ = context;
     return std.mem.order(u8, a, b) == .lt;
 }
 
 pub fn versionLessThan(context: void, a: []const u8, b: []const u8) bool {
+    _ = context;
     const sep_chars = "GLIBC_.";
     var a_tokens = std.mem.tokenize(a, sep_chars);
     var b_tokens = std.mem.tokenize(b, sep_chars);
tools/update_spirv_features.zig
@@ -37,6 +37,7 @@ const Version = struct {
     }
 
     fn lessThan(ctx: void, a: Version, b: Version) bool {
+        _ = ctx;
         return if (a.major == b.major)
             a.minor < b.minor
         else