Commit 538d9a5dd8

Vexu <git@vexu.eu>
2020-02-24 22:39:03
remove uses of `@ArgType` and `@IntType`
1 parent 3458fb8
doc/langref.html.in
@@ -550,7 +550,7 @@ pub fn main() void {
       {#syntax#}i7{#endsyntax#} refers to a signed 7-bit integer. The maximum allowed bit-width of an
       integer type is {#syntax#}65535{#endsyntax#}.
       </p>
-      {#see_also|Integers|Floats|void|Errors|@IntType#}
+      {#see_also|Integers|Floats|void|Errors|@Type#}
       {#header_close#}
       {#header_open|Primitive Values#}
       <div class="table-wrapper">
@@ -6667,18 +6667,6 @@ comptime {
       </p>
       {#see_also|Alignment#}
       {#header_close#}
-      {#header_open|@ArgType#}
-      <pre>{#syntax#}@ArgType(comptime T: type, comptime n: usize) type{#endsyntax#}</pre>
-      <p>
-      This builtin function takes a function type and returns the type of the parameter at index {#syntax#}n{#endsyntax#}.
-      </p>
-      <p>
-      {#syntax#}T{#endsyntax#} must be a function type.
-      </p>
-      <p>
-      Note: This function is deprecated. Use {#link|@typeInfo#} instead.
-      </p>
-      {#header_close#}
 
       {#header_open|@as#}
       <pre>{#syntax#}@as(comptime T: type, expression) T{#endsyntax#}</pre>
@@ -7337,7 +7325,7 @@ test "main" {
       {#header_close#}
 
       {#header_open|@errorToInt#}
-      <pre>{#syntax#}@errorToInt(err: var) @IntType(false, @sizeOf(anyerror) * 8){#endsyntax#}</pre>
+      <pre>{#syntax#}@errorToInt(err: var) std.meta.IntType(false, @sizeOf(anyerror) * 8){#endsyntax#}</pre>
       <p>
       Supports the following types:
       </p>
@@ -7631,7 +7619,7 @@ test "@hasDecl" {
       {#header_close#}
 
       {#header_open|@intToError#}
-      <pre>{#syntax#}@intToError(value: @IntType(false, @sizeOf(anyerror) * 8)) anyerror{#endsyntax#}</pre>
+      <pre>{#syntax#}@intToError(value: std.meta.IntType(false, @sizeOf(anyerror) * 8)) anyerror{#endsyntax#}</pre>
       <p>
       Converts from the integer representation of an error into {#link|The Global Error Set#} type.
       </p>
@@ -7664,17 +7652,6 @@ test "@hasDecl" {
       </p>
       {#header_close#}
 
-      {#header_open|@IntType#}
-      <pre>{#syntax#}@IntType(comptime is_signed: bool, comptime bit_count: u16) type{#endsyntax#}</pre>
-      <p>
-      This function returns an integer type with the given signness and bit count. The maximum
-      bit count for an integer type is {#syntax#}65535{#endsyntax#}.
-      </p>
-      <p>
-      Deprecated. Use {#link|@Type#}.
-      </p>
-      {#header_close#}
-
       {#header_open|@memcpy#}
       <pre>{#syntax#}@memcpy(noalias dest: [*]u8, noalias source: [*]const u8, byte_count: usize){#endsyntax#}</pre>
       <p>
lib/std/debug/leb128.zig
@@ -2,7 +2,7 @@ const std = @import("std");
 const testing = std.testing;
 
 pub fn readULEB128(comptime T: type, in_stream: var) !T {
-    const ShiftT = @IntType(false, std.math.log2(T.bit_count));
+    const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
 
     var result: T = 0;
     var shift: usize = 0;
@@ -27,7 +27,7 @@ pub fn readULEB128(comptime T: type, in_stream: var) !T {
 }
 
 pub fn readULEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
-    const ShiftT = @IntType(false, std.math.log2(T.bit_count));
+    const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
 
     var result: T = 0;
     var shift: usize = 0;
@@ -55,8 +55,8 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
 }
 
 pub fn readILEB128(comptime T: type, in_stream: var) !T {
-    const UT = @IntType(false, T.bit_count);
-    const ShiftT = @IntType(false, std.math.log2(T.bit_count));
+    const UT = std.meta.IntType(false, T.bit_count);
+    const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
 
     var result: UT = 0;
     var shift: usize = 0;
@@ -87,8 +87,8 @@ pub fn readILEB128(comptime T: type, in_stream: var) !T {
 }
 
 pub fn readILEB128Mem(comptime T: type, ptr: *[*]const u8) !T {
-    const UT = @IntType(false, T.bit_count);
-    const ShiftT = @IntType(false, std.math.log2(T.bit_count));
+    const UT = std.meta.IntType(false, T.bit_count);
+    const ShiftT = std.meta.IntType(false, std.math.log2(T.bit_count));
 
     var result: UT = 0;
     var shift: usize = 0;
lib/std/fmt/parse_float.zig
@@ -393,7 +393,7 @@ test "fmt.parseFloat" {
     const epsilon = 1e-7;
 
     inline for ([_]type{ f16, f32, f64, f128 }) |T| {
-        const Z = @IntType(false, T.bit_count);
+        const Z = std.meta.IntType(false, T.bit_count);
 
         testing.expectError(error.InvalidCharacter, parseFloat(T, ""));
         testing.expectError(error.InvalidCharacter, parseFloat(T, "   1"));
lib/std/hash/auto_hash.zig
@@ -93,7 +93,7 @@ pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
         // TODO Check if the situation is better after #561 is resolved.
         .Int => @call(.{ .modifier = .always_inline }, hasher.update, .{std.mem.asBytes(&key)}),
 
-        .Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
+        .Float => |info| hash(hasher, @bitCast(std.meta.IntType(false, info.bits), key), strat),
 
         .Bool => hash(hasher, @boolToInt(key), strat),
         .Enum => hash(hasher, @enumToInt(key), strat),
lib/std/hash/wyhash.zig
@@ -10,7 +10,7 @@ const primes = [_]u64{
 };
 
 fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
-    const T = @IntType(false, 8 * bytes);
+    const T = std.meta.IntType(false, 8 * bytes);
     return mem.readIntSliceLittle(T, data[0..bytes]);
 }
 
lib/std/io/test.zig
@@ -318,6 +318,7 @@ test "BitStreams with File Stream" {
 }
 
 fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packing: io.Packing) !void {
+    @setEvalBranchQuota(1500);
     //@NOTE: if this test is taking too long, reduce the maximum tested bitsize
     const max_test_bitsize = 128;
 
@@ -341,8 +342,8 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
 
     comptime var i = 0;
     inline while (i <= max_test_bitsize) : (i += 1) {
-        const U = @IntType(false, i);
-        const S = @IntType(true, i);
+        const U = std.meta.IntType(false, i);
+        const S = std.meta.IntType(true, i);
         try serializer.serializeInt(@as(U, i));
         if (i != 0) try serializer.serializeInt(@as(S, -1)) else try serializer.serialize(@as(S, 0));
     }
@@ -350,8 +351,8 @@ fn testIntSerializerDeserializer(comptime endian: builtin.Endian, comptime packi
 
     i = 0;
     inline while (i <= max_test_bitsize) : (i += 1) {
-        const U = @IntType(false, i);
-        const S = @IntType(true, i);
+        const U = std.meta.IntType(false, i);
+        const S = std.meta.IntType(true, i);
         const x = try deserializer.deserializeInt(U);
         const y = try deserializer.deserializeInt(S);
         expect(x == @as(U, i));
lib/std/math/big/int.zig
@@ -9,7 +9,7 @@ const maxInt = std.math.maxInt;
 const minInt = std.math.minInt;
 
 pub const Limb = usize;
-pub const DoubleLimb = @IntType(false, 2 * Limb.bit_count);
+pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count);
 pub const Log2Limb = math.Log2Int(Limb);
 
 comptime {
@@ -268,7 +268,7 @@ pub const Int = struct {
 
         switch (@typeInfo(T)) {
             .Int => |info| {
-                const UT = if (T.is_signed) @IntType(false, T.bit_count - 1) else T;
+                const UT = if (T.is_signed) std.meta.IntType(false, T.bit_count - 1) else T;
 
                 try self.ensureCapacity(@sizeOf(UT) / @sizeOf(Limb));
                 self.metadata = 0;
@@ -331,7 +331,7 @@ pub const Int = struct {
     pub fn to(self: Int, comptime T: type) ConvertError!T {
         switch (@typeInfo(T)) {
             .Int => {
-                const UT = @IntType(false, T.bit_count);
+                const UT = std.meta.IntType(false, T.bit_count);
 
                 if (self.bitCountTwosComp() > T.bit_count) {
                     return error.TargetTooSmall;
lib/std/math/big/rational.zig
@@ -128,7 +128,7 @@ pub const Rational = struct {
         // Translated from golang.go/src/math/big/rat.go.
         debug.assert(@typeInfo(T) == .Float);
 
-        const UnsignedIntType = @IntType(false, T.bit_count);
+        const UnsignedIntType = std.meta.IntType(false, T.bit_count);
         const f_bits = @bitCast(UnsignedIntType, f);
 
         const exponent_bits = math.floatExponentBits(T);
@@ -187,7 +187,7 @@ pub const Rational = struct {
         debug.assert(@typeInfo(T) == .Float);
 
         const fsize = T.bit_count;
-        const BitReprType = @IntType(false, T.bit_count);
+        const BitReprType = std.meta.IntType(false, T.bit_count);
 
         const msize = math.floatMantissaBits(T);
         const msize1 = msize + 1;
@@ -462,7 +462,7 @@ pub const Rational = struct {
     }
 };
 
-const SignedDoubleLimb = @IntType(true, DoubleLimb.bit_count);
+const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count);
 
 fn gcd(rma: *Int, x: Int, y: Int) !void {
     rma.assertWritable();
lib/std/math/cos.zig
@@ -44,7 +44,7 @@ const pi4c = 2.69515142907905952645E-15;
 const m4pi = 1.273239544735162542821171882678754627704620361328125;
 
 fn cos_(comptime T: type, x_: T) T {
-    const I = @IntType(true, T.bit_count);
+    const I = std.meta.IntType(true, T.bit_count);
 
     var x = x_;
     if (math.isNan(x) or math.isInf(x)) {
lib/std/math/pow.zig
@@ -145,7 +145,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
     var xe = r2.exponent;
     var x1 = r2.significand;
 
-    var i = @floatToInt(@IntType(true, T.bit_count), yi);
+    var i = @floatToInt(std.meta.IntType(true, T.bit_count), yi);
     while (i != 0) : (i >>= 1) {
         const overflow_shift = math.floatExponentBits(T) + 1;
         if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {
lib/std/math/sin.zig
@@ -45,7 +45,7 @@ const pi4c = 2.69515142907905952645E-15;
 const m4pi = 1.273239544735162542821171882678754627704620361328125;
 
 fn sin_(comptime T: type, x_: T) T {
-    const I = @IntType(true, T.bit_count);
+    const I = std.meta.IntType(true, T.bit_count);
 
     var x = x_;
     if (x == 0 or math.isNan(x)) {
lib/std/math/sqrt.zig
@@ -31,7 +31,7 @@ pub fn sqrt(x: var) Sqrt(@TypeOf(x)) {
     }
 }
 
-fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) {
+fn sqrt_int(comptime T: type, value: T) std.meta.IntType(false, T.bit_count / 2) {
     var op = value;
     var res: T = 0;
     var one: T = 1 << (T.bit_count - 2);
@@ -50,7 +50,7 @@ fn sqrt_int(comptime T: type, value: T) @IntType(false, T.bit_count / 2) {
         one >>= 2;
     }
 
-    const ResultType = @IntType(false, T.bit_count / 2);
+    const ResultType = std.meta.IntType(false, T.bit_count / 2);
     return @intCast(ResultType, res);
 }
 
@@ -66,7 +66,7 @@ test "math.sqrt_int" {
 /// Returns the return type `sqrt` will return given an operand of type `T`.
 pub fn Sqrt(comptime T: type) type {
     return switch (@typeInfo(T)) {
-        .Int => |int| @IntType(false, int.bits / 2),
+        .Int => |int| std.meta.IntType(false, int.bits / 2),
         else => T,
     };
 }
lib/std/math/tan.zig
@@ -38,7 +38,7 @@ const pi4c = 2.69515142907905952645E-15;
 const m4pi = 1.273239544735162542821171882678754627704620361328125;
 
 fn tan_(comptime T: type, x_: T) T {
-    const I = @IntType(true, T.bit_count);
+    const I = std.meta.IntType(true, T.bit_count);
 
     var x = x_;
     if (x == 0 or math.isNan(x)) {
lib/std/os/bits/linux.zig
@@ -1004,7 +1004,7 @@ pub const dl_phdr_info = extern struct {
 
 pub const CPU_SETSIZE = 128;
 pub const cpu_set_t = [CPU_SETSIZE / @sizeOf(usize)]usize;
-pub const cpu_count_t = @IntType(false, std.math.log2(CPU_SETSIZE * 8));
+pub const cpu_count_t = std.meta.IntType(false, std.math.log2(CPU_SETSIZE * 8));
 
 pub fn CPU_COUNT(set: cpu_set_t) cpu_count_t {
     var sum: cpu_count_t = 0;
lib/std/special/compiler_rt/addXf3.zig
@@ -54,21 +54,21 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
 }
 
 // TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
-    const Z = @IntType(false, T.bit_count);
-    const S = @IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
+    const Z = std.meta.IntType(false, T.bit_count);
+    const S = std.meta.IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
     const significandBits = std.math.floatMantissaBits(T);
     const implicitBit = @as(Z, 1) << significandBits;
 
-    const shift = @clz(@IntType(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
+    const shift = @clz(std.meta.IntType(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
     significand.* <<= @intCast(S, shift);
     return 1 - shift;
 }
 
 // TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
 fn addXf3(comptime T: type, a: T, b: T) T {
-    const Z = @IntType(false, T.bit_count);
-    const S = @IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+    const Z = std.meta.IntType(false, T.bit_count);
+    const S = std.meta.IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
 
     const typeWidth = T.bit_count;
     const significandBits = std.math.floatMantissaBits(T);
@@ -182,7 +182,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
         // If partial cancellation occured, we need to left-shift the result
         // and adjust the exponent:
         if (aSignificand < implicitBit << 3) {
-            const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(@IntType(false, T.bit_count), implicitBit << 3));
+            const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.IntType(false, T.bit_count), implicitBit << 3));
             aSignificand <<= @intCast(S, shift);
             aExponent -= shift;
         }
lib/std/special/compiler_rt/compareXf2.zig
@@ -22,8 +22,8 @@ const GE = extern enum(i32) {
 pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
     @setRuntimeSafety(builtin.is_test);
 
-    const srep_t = @IntType(true, T.bit_count);
-    const rep_t = @IntType(false, T.bit_count);
+    const srep_t = std.meta.IntType(true, T.bit_count);
+    const rep_t = std.meta.IntType(false, T.bit_count);
 
     const significandBits = std.math.floatMantissaBits(T);
     const exponentBits = std.math.floatExponentBits(T);
@@ -68,7 +68,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
 pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
     @setRuntimeSafety(builtin.is_test);
 
-    const rep_t = @IntType(false, T.bit_count);
+    const rep_t = std.meta.IntType(false, T.bit_count);
 
     const significandBits = std.math.floatMantissaBits(T);
     const exponentBits = std.math.floatExponentBits(T);
lib/std/special/compiler_rt/divdf3.zig
@@ -7,8 +7,8 @@ const builtin = @import("builtin");
 
 pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
     @setRuntimeSafety(builtin.is_test);
-    const Z = @IntType(false, f64.bit_count);
-    const SignedZ = @IntType(true, f64.bit_count);
+    const Z = std.meta.IntType(false, f64.bit_count);
+    const SignedZ = std.meta.IntType(true, f64.bit_count);
 
     const typeWidth = f64.bit_count;
     const significandBits = std.math.floatMantissaBits(f64);
@@ -312,9 +312,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
     }
 }
 
-fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
     @setRuntimeSafety(builtin.is_test);
-    const Z = @IntType(false, T.bit_count);
+    const Z = std.meta.IntType(false, T.bit_count);
     const significandBits = std.math.floatMantissaBits(T);
     const implicitBit = @as(Z, 1) << significandBits;
 
lib/std/special/compiler_rt/divsf3.zig
@@ -7,7 +7,7 @@ const builtin = @import("builtin");
 
 pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
     @setRuntimeSafety(builtin.is_test);
-    const Z = @IntType(false, f32.bit_count);
+    const Z = std.meta.IntType(false, f32.bit_count);
 
     const typeWidth = f32.bit_count;
     const significandBits = std.math.floatMantissaBits(f32);
@@ -185,9 +185,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
     }
 }
 
-fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
     @setRuntimeSafety(builtin.is_test);
-    const Z = @IntType(false, T.bit_count);
+    const Z = std.meta.IntType(false, T.bit_count);
     const significandBits = std.math.floatMantissaBits(T);
     const implicitBit = @as(Z, 1) << significandBits;
 
lib/std/special/compiler_rt/extendXfYf2.zig
@@ -30,11 +30,11 @@ pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
 
 const CHAR_BIT = 8;
 
-fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: @IntType(false, @typeInfo(src_t).Float.bits)) dst_t {
+fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.IntType(false, @typeInfo(src_t).Float.bits)) dst_t {
     @setRuntimeSafety(builtin.is_test);
 
-    const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
-    const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
+    const src_rep_t = std.meta.IntType(false, @typeInfo(src_t).Float.bits);
+    const dst_rep_t = std.meta.IntType(false, @typeInfo(dst_t).Float.bits);
     const srcSigBits = std.math.floatMantissaBits(src_t);
     const dstSigBits = std.math.floatMantissaBits(dst_t);
     const SrcShift = std.math.Log2Int(src_rep_t);
lib/std/special/compiler_rt/fixint.zig
@@ -45,7 +45,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
     if (exponent < 0) return 0;
 
     // The unsigned result needs to be large enough to handle an fixint_t or rep_t
-    const fixuint_t = @IntType(false, fixint_t.bit_count);
+    const fixuint_t = std.meta.IntType(false, fixint_t.bit_count);
     const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t;
     var uint_result: UintResultType = undefined;
 
lib/std/special/compiler_rt/fixuint.zig
@@ -10,7 +10,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
         f128 => u128,
         else => unreachable,
     };
-    const srep_t = @IntType(true, rep_t.bit_count);
+    const srep_t = @import("std").meta.IntType(true, rep_t.bit_count);
     const significandBits = switch (fp_t) {
         f32 => 23,
         f64 => 52,
lib/std/special/compiler_rt/floatsiXf.zig
@@ -5,8 +5,8 @@ const maxInt = std.math.maxInt;
 fn floatsiXf(comptime T: type, a: i32) T {
     @setRuntimeSafety(builtin.is_test);
 
-    const Z = @IntType(false, T.bit_count);
-    const S = @IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
+    const Z = std.meta.IntType(false, T.bit_count);
+    const S = std.meta.IntType(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
 
     if (a == 0) {
         return @as(T, 0.0);
lib/std/special/compiler_rt/mulXf3.zig
@@ -28,7 +28,7 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
 
 fn mulXf3(comptime T: type, a: T, b: T) T {
     @setRuntimeSafety(builtin.is_test);
-    const Z = @IntType(false, T.bit_count);
+    const Z = std.meta.IntType(false, T.bit_count);
 
     const typeWidth = T.bit_count;
     const significandBits = std.math.floatMantissaBits(T);
@@ -264,9 +264,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
     }
 }
 
-fn normalize(comptime T: type, significand: *@IntType(false, T.bit_count)) i32 {
+fn normalize(comptime T: type, significand: *std.meta.IntType(false, T.bit_count)) i32 {
     @setRuntimeSafety(builtin.is_test);
-    const Z = @IntType(false, T.bit_count);
+    const Z = std.meta.IntType(false, T.bit_count);
     const significandBits = std.math.floatMantissaBits(T);
     const implicitBit = @as(Z, 1) << significandBits;
 
lib/std/special/compiler_rt/negXf2.zig
@@ -19,7 +19,7 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
 }
 
 fn negXf2(comptime T: type, a: T) T {
-    const Z = @IntType(false, T.bit_count);
+    const Z = std.meta.IntType(false, T.bit_count);
 
     const typeWidth = T.bit_count;
     const significandBits = std.math.floatMantissaBits(T);
lib/std/special/compiler_rt/truncXfYf2.zig
@@ -36,8 +36,8 @@ pub fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
 }
 
 inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
-    const src_rep_t = @IntType(false, @typeInfo(src_t).Float.bits);
-    const dst_rep_t = @IntType(false, @typeInfo(dst_t).Float.bits);
+    const src_rep_t = std.meta.IntType(false, @typeInfo(src_t).Float.bits);
+    const dst_rep_t = std.meta.IntType(false, @typeInfo(dst_t).Float.bits);
     const srcSigBits = std.math.floatMantissaBits(src_t);
     const dstSigBits = std.math.floatMantissaBits(dst_t);
     const SrcShift = std.math.Log2Int(src_rep_t);
lib/std/special/compiler_rt/udivmod.zig
@@ -10,8 +10,8 @@ const high = 1 - low;
 pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
     @setRuntimeSafety(is_test);
 
-    const SingleInt = @IntType(false, @divExact(DoubleInt.bit_count, 2));
-    const SignedDoubleInt = @IntType(true, DoubleInt.bit_count);
+    const SingleInt = @import("std").meta.IntType(false, @divExact(DoubleInt.bit_count, 2));
+    const SignedDoubleInt = @import("std").meta.IntType(true, DoubleInt.bit_count);
     const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
 
     const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
lib/std/special/c.zig
@@ -511,7 +511,7 @@ export fn roundf(a: f32) f32 {
 fn generic_fmod(comptime T: type, x: T, y: T) T {
     @setRuntimeSafety(false);
 
-    const uint = @IntType(false, T.bit_count);
+    const uint = std.meta.IntType(false, T.bit_count);
     const log2uint = math.Log2Int(uint);
     const digits = if (T == f32) 23 else 52;
     const exp_bits = if (T == f32) 9 else 12;
lib/std/child_process.zig
@@ -851,7 +851,7 @@ fn forkChildErrReport(fd: i32, err: ChildProcess.SpawnError) noreturn {
     os.exit(1);
 }
 
-const ErrInt = @IntType(false, @sizeOf(anyerror) * 8);
+const ErrInt = std.meta.IntType(false, @sizeOf(anyerror) * 8);
 
 fn writeIntFd(fd: i32, value: ErrInt) !void {
     const file = File{
lib/std/fmt.zig
@@ -82,7 +82,7 @@ pub fn format(
     comptime fmt: []const u8,
     args: var,
 ) Errors!void {
-    const ArgSetType = @IntType(false, 32);
+    const ArgSetType = u32;
     if (@typeInfo(@TypeOf(args)) != .Struct) {
         @compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
     }
@@ -944,7 +944,7 @@ fn formatIntSigned(
         .fill = options.fill,
     };
 
-    const uint = @IntType(false, @TypeOf(value).bit_count);
+    const uint = std.meta.IntType(false, @TypeOf(value).bit_count);
     if (value < 0) {
         const minus_sign: u8 = '-';
         try output(context, @as(*const [1]u8, &minus_sign)[0..]);
@@ -972,7 +972,7 @@ fn formatIntUnsigned(
     assert(base >= 2);
     var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
     const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count);
-    const MinInt = @IntType(@TypeOf(value).is_signed, min_int_bits);
+    const MinInt = std.meta.IntType(@TypeOf(value).is_signed, min_int_bits);
     var a: MinInt = value;
     var index: usize = buf.len;
 
lib/std/heap.zig
@@ -1015,7 +1015,7 @@ fn testAllocatorLargeAlignment(allocator: *mem.Allocator) mem.Allocator.Error!vo
     //  very near usize?
     if (mem.page_size << 2 > maxInt(usize)) return;
 
-    const USizeShift = @IntType(false, std.math.log2(usize.bit_count));
+    const USizeShift = std.meta.IntType(false, std.math.log2(usize.bit_count));
     const large_align = @as(u29, mem.page_size << 2);
 
     var align_mask: usize = undefined;
lib/std/io.zig
@@ -337,7 +337,7 @@ pub fn BitInStream(endian: builtin.Endian, comptime Error: type) type {
                 assert(u_bit_count >= bits);
                 break :bc if (u_bit_count <= u8_bit_count) u8_bit_count else u_bit_count;
             };
-            const Buf = @IntType(false, buf_bit_count);
+            const Buf = std.meta.IntType(false, buf_bit_count);
             const BufShift = math.Log2Int(Buf);
 
             out_bits.* = @as(usize, 0);
@@ -659,7 +659,7 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
                 assert(u_bit_count >= bits);
                 break :bc if (u_bit_count <= u8_bit_count) u8_bit_count else u_bit_count;
             };
-            const Buf = @IntType(false, buf_bit_count);
+            const Buf = std.meta.IntType(false, buf_bit_count);
             const BufShift = math.Log2Int(Buf);
 
             const buf_value = @intCast(Buf, value);
@@ -836,7 +836,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
             const u8_bit_count = 8;
             const t_bit_count = comptime meta.bitCount(T);
 
-            const U = @IntType(false, t_bit_count);
+            const U = std.meta.IntType(false, t_bit_count);
             const Log2U = math.Log2Int(U);
             const int_size = (U.bit_count + 7) / 8;
 
@@ -851,7 +851,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
 
             if (int_size == 1) {
                 if (t_bit_count == 8) return @bitCast(T, buffer[0]);
-                const PossiblySignedByte = @IntType(T.is_signed, 8);
+                const PossiblySignedByte = std.meta.IntType(T.is_signed, 8);
                 return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
             }
 
@@ -1014,7 +1014,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
             const t_bit_count = comptime meta.bitCount(T);
             const u8_bit_count = comptime meta.bitCount(u8);
 
-            const U = @IntType(false, t_bit_count);
+            const U = std.meta.IntType(false, t_bit_count);
             const Log2U = math.Log2Int(U);
             const int_size = (U.bit_count + 7) / 8;
 
lib/std/math.zig
@@ -444,7 +444,7 @@ pub fn Log2Int(comptime T: type) type {
         count += 1;
     }
 
-    return @IntType(false, count);
+    return std.meta.IntType(false, count);
 }
 
 pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) type {
@@ -460,7 +460,7 @@ pub fn IntFittingRange(comptime from: comptime_int, comptime to: comptime_int) t
     if (is_signed) {
         magnitude_bits += 1;
     }
-    return @IntType(is_signed, magnitude_bits);
+    return std.meta.IntType(is_signed, magnitude_bits);
 }
 
 test "math.IntFittingRange" {
@@ -674,13 +674,13 @@ pub fn absCast(x: var) t: {
     if (@TypeOf(x) == comptime_int) {
         break :t comptime_int;
     } else {
-        break :t @IntType(false, @TypeOf(x).bit_count);
+        break :t std.meta.IntType(false, @TypeOf(x).bit_count);
     }
 } {
     if (@TypeOf(x) == comptime_int) {
         return if (x < 0) -x else x;
     }
-    const uint = @IntType(false, @TypeOf(x).bit_count);
+    const uint = std.meta.IntType(false, @TypeOf(x).bit_count);
     if (x >= 0) return @intCast(uint, x);
 
     return @intCast(uint, -(x + 1)) + 1;
@@ -701,10 +701,10 @@ test "math.absCast" {
 
 /// Returns the negation of the integer parameter.
 /// Result is a signed integer.
-pub fn negateCast(x: var) !@IntType(true, @TypeOf(x).bit_count) {
+pub fn negateCast(x: var) !std.meta.IntType(true, @TypeOf(x).bit_count) {
     if (@TypeOf(x).is_signed) return negate(x);
 
-    const int = @IntType(true, @TypeOf(x).bit_count);
+    const int = std.meta.IntType(true, @TypeOf(x).bit_count);
     if (x > -minInt(int)) return error.Overflow;
 
     if (x == -minInt(int)) return minInt(int);
@@ -790,11 +790,11 @@ fn testFloorPowerOfTwo() void {
 /// Returns the next power of two (if the value is not already a power of two).
 /// Only unsigned integers can be used. Zero is not an allowed input.
 /// Result is a type with 1 more bit than the input type.
-pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) @IntType(T.is_signed, T.bit_count + 1) {
+pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.IntType(T.is_signed, T.bit_count + 1) {
     comptime assert(@typeInfo(T) == .Int);
     comptime assert(!T.is_signed);
     assert(value != 0);
-    comptime const PromotedType = @IntType(T.is_signed, T.bit_count + 1);
+    comptime const PromotedType = std.meta.IntType(T.is_signed, T.bit_count + 1);
     comptime const shiftType = std.math.Log2Int(PromotedType);
     return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
 }
@@ -805,7 +805,7 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) @IntType(T.is_signed, T
 pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
     comptime assert(@typeInfo(T) == .Int);
     comptime assert(!T.is_signed);
-    comptime const PromotedType = @IntType(T.is_signed, T.bit_count + 1);
+    comptime const PromotedType = std.meta.IntType(T.is_signed, T.bit_count + 1);
     comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
     var x = ceilPowerOfTwoPromote(T, value);
     if (overflowBit & x != 0) {
@@ -947,8 +947,8 @@ test "max value type" {
     testing.expect(x == 2147483647);
 }
 
-pub fn mulWide(comptime T: type, a: T, b: T) @IntType(T.is_signed, T.bit_count * 2) {
-    const ResultInt = @IntType(T.is_signed, T.bit_count * 2);
+pub fn mulWide(comptime T: type, a: T, b: T) std.meta.IntType(T.is_signed, T.bit_count * 2) {
+    const ResultInt = std.meta.IntType(T.is_signed, T.bit_count * 2);
     return @as(ResultInt, a) * @as(ResultInt, b);
 }
 
lib/std/mem.zig
@@ -708,7 +708,7 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
     assert(buffer.len >= @divExact(T.bit_count, 8));
 
     // TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
-    const uint = @IntType(false, T.bit_count);
+    const uint = std.meta.IntType(false, T.bit_count);
     var bits = @truncate(uint, value);
     for (buffer) |*b| {
         b.* = @truncate(u8, bits);
@@ -725,7 +725,7 @@ pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
     assert(buffer.len >= @divExact(T.bit_count, 8));
 
     // TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
-    const uint = @IntType(false, T.bit_count);
+    const uint = std.meta.IntType(false, T.bit_count);
     var bits = @truncate(uint, value);
     var index: usize = buffer.len;
     while (index != 0) {
lib/std/meta.zig
@@ -580,3 +580,12 @@ pub fn declList(comptime Namespace: type, comptime Decl: type) []const *const De
         return &array;
     }
 }
+
+pub fn IntType(comptime is_signed: bool, comptime bit_count: u16) type {
+    return @Type(TypeInfo{
+        .Int = .{
+            .is_signed = is_signed,
+            .bits = bit_count,
+        },
+    });
+}
lib/std/os.zig
@@ -3349,7 +3349,7 @@ pub fn res_mkquery(
     // Make a reasonably unpredictable id
     var ts: timespec = undefined;
     clock_gettime(CLOCK_REALTIME, &ts) catch {};
-    const UInt = @IntType(false, @TypeOf(ts.tv_nsec).bit_count);
+    const UInt = std.meta.IntType(false, @TypeOf(ts.tv_nsec).bit_count);
     const unsec = @bitCast(UInt, ts.tv_nsec);
     const id = @truncate(u32, unsec + unsec / 65536);
     q[0] = @truncate(u8, id / 256);
lib/std/packed_int_array.zig
@@ -34,13 +34,13 @@ pub fn PackedIntIo(comptime Int: type, comptime endian: builtin.Endian) type {
 
     //we bitcast the desired Int type to an unsigned version of itself
     // to avoid issues with shifting signed ints.
-    const UnInt = @IntType(false, int_bits);
+    const UnInt = std.meta.IntType(false, int_bits);
 
     //The maximum container int type
-    const MinIo = @IntType(false, min_io_bits);
+    const MinIo = std.meta.IntType(false, min_io_bits);
 
     //The minimum container int type
-    const MaxIo = @IntType(false, max_io_bits);
+    const MaxIo = std.meta.IntType(false, max_io_bits);
 
     return struct {
         pub fn get(bytes: []const u8, index: usize, bit_offset: u7) Int {
@@ -322,7 +322,7 @@ test "PackedIntArray" {
     inline while (bits <= 256) : (bits += 1) {
         //alternate unsigned and signed
         const even = bits % 2 == 0;
-        const I = @IntType(even, bits);
+        const I = std.meta.IntType(even, bits);
 
         const PackedArray = PackedIntArray(I, int_count);
         const expected_bytes = ((bits * int_count) + 7) / 8;
@@ -369,7 +369,7 @@ test "PackedIntSlice" {
     inline while (bits <= 256) : (bits += 1) {
         //alternate unsigned and signed
         const even = bits % 2 == 0;
-        const I = @IntType(even, bits);
+        const I = std.meta.IntType(even, bits);
         const P = PackedIntSlice(I);
 
         var data = P.init(&buffer, int_count);
@@ -399,7 +399,7 @@ test "PackedIntSlice of PackedInt(Array/Slice)" {
 
     comptime var bits = 0;
     inline while (bits <= max_bits) : (bits += 1) {
-        const Int = @IntType(false, bits);
+        const Int = std.meta.IntType(false, bits);
 
         const PackedArray = PackedIntArray(Int, int_count);
         var packed_array = @as(PackedArray, undefined);
lib/std/rand.zig
@@ -45,8 +45,8 @@ pub const Random = struct {
     /// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
     /// `i` is evenly distributed.
     pub fn int(r: *Random, comptime T: type) T {
-        const UnsignedT = @IntType(false, T.bit_count);
-        const ByteAlignedT = @IntType(false, @divTrunc(T.bit_count + 7, 8) * 8);
+        const UnsignedT = std.meta.IntType(false, T.bit_count);
+        const ByteAlignedT = std.meta.IntType(false, @divTrunc(T.bit_count + 7, 8) * 8);
 
         var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
         r.bytes(rand_bytes[0..]);
@@ -85,9 +85,9 @@ pub const Random = struct {
         comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
         assert(0 < less_than);
         // Small is typically u32
-        const Small = @IntType(false, @divTrunc(T.bit_count + 31, 32) * 32);
+        const Small = std.meta.IntType(false, @divTrunc(T.bit_count + 31, 32) * 32);
         // Large is typically u64
-        const Large = @IntType(false, Small.bit_count * 2);
+        const Large = std.meta.IntType(false, Small.bit_count * 2);
 
         // adapted from:
         //   http://www.pcg-random.org/posts/bounded-rands.html
@@ -99,7 +99,7 @@ pub const Random = struct {
             // TODO: workaround for https://github.com/ziglang/zig/issues/1770
             // should be:
             //   var t: Small = -%less_than;
-            var t: Small = @bitCast(Small, -%@bitCast(@IntType(true, Small.bit_count), @as(Small, less_than)));
+            var t: Small = @bitCast(Small, -%@bitCast(std.meta.IntType(true, Small.bit_count), @as(Small, less_than)));
 
             if (t >= less_than) {
                 t -= less_than;
@@ -145,7 +145,7 @@ pub const Random = struct {
         assert(at_least < less_than);
         if (T.is_signed) {
             // Two's complement makes this math pretty easy.
-            const UnsignedT = @IntType(false, T.bit_count);
+            const UnsignedT = std.meta.IntType(false, T.bit_count);
             const lo = @bitCast(UnsignedT, at_least);
             const hi = @bitCast(UnsignedT, less_than);
             const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
@@ -163,7 +163,7 @@ pub const Random = struct {
         assert(at_least < less_than);
         if (T.is_signed) {
             // Two's complement makes this math pretty easy.
-            const UnsignedT = @IntType(false, T.bit_count);
+            const UnsignedT = std.meta.IntType(false, T.bit_count);
             const lo = @bitCast(UnsignedT, at_least);
             const hi = @bitCast(UnsignedT, less_than);
             const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
@@ -180,7 +180,7 @@ pub const Random = struct {
         assert(at_least <= at_most);
         if (T.is_signed) {
             // Two's complement makes this math pretty easy.
-            const UnsignedT = @IntType(false, T.bit_count);
+            const UnsignedT = std.meta.IntType(false, T.bit_count);
             const lo = @bitCast(UnsignedT, at_least);
             const hi = @bitCast(UnsignedT, at_most);
             const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
@@ -198,7 +198,7 @@ pub const Random = struct {
         assert(at_least <= at_most);
         if (T.is_signed) {
             // Two's complement makes this math pretty easy.
-            const UnsignedT = @IntType(false, T.bit_count);
+            const UnsignedT = std.meta.IntType(false, T.bit_count);
             const lo = @bitCast(UnsignedT, at_least);
             const hi = @bitCast(UnsignedT, at_most);
             const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
@@ -281,7 +281,7 @@ pub const Random = struct {
 /// This function introduces a minor bias.
 pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
     comptime assert(T.is_signed == false);
-    const T2 = @IntType(false, T.bit_count * 2);
+    const T2 = std.meta.IntType(false, T.bit_count * 2);
 
     // adapted from:
     //   http://www.pcg-random.org/posts/bounded-rands.html
lib/std/target.zig
@@ -222,7 +222,7 @@ pub const Target = union(enum) {
                 pub const needed_bit_count = 154;
                 pub const byte_count = (needed_bit_count + 7) / 8;
                 pub const usize_count = (byte_count + (@sizeOf(usize) - 1)) / @sizeOf(usize);
-                pub const Index = std.math.Log2Int(@IntType(false, usize_count * @bitSizeOf(usize)));
+                pub const Index = std.math.Log2Int(std.meta.IntType(false, usize_count * @bitSizeOf(usize)));
                 pub const ShiftInt = std.math.Log2Int(usize);
 
                 pub const empty = Set{ .ints = [1]usize{0} ** usize_count };
lib/std/thread.zig
@@ -148,7 +148,7 @@ pub const Thread = struct {
         const default_stack_size = 16 * 1024 * 1024;
 
         const Context = @TypeOf(context);
-        comptime assert(@ArgType(@TypeOf(startFn), 0) == Context);
+        comptime assert(@typeInfo(@TypeOf(startFn)).Fn.args[0].arg_type.? == Context);
 
         if (builtin.os == builtin.Os.windows) {
             const WinThread = struct {
src-self-hosted/type.zig
@@ -1042,7 +1042,7 @@ fn hashAny(x: var, comptime seed: u64) u32 {
     switch (@typeInfo(@TypeOf(x))) {
         .Int => |info| {
             comptime var rng = comptime std.rand.DefaultPrng.init(seed);
-            const unsigned_x = @bitCast(@IntType(false, info.bits), x);
+            const unsigned_x = @bitCast(std.meta.IntType(false, info.bits), x);
             if (info.bits <= 32) {
                 return @as(u32, unsigned_x) *% comptime rng.random.scalar(u32);
             } else {
test/stage1/behavior/bit_shifting.zig
@@ -2,9 +2,9 @@ const std = @import("std");
 const expect = std.testing.expect;
 
 fn ShardedTable(comptime Key: type, comptime mask_bit_count: comptime_int, comptime V: type) type {
-    expect(Key == @IntType(false, Key.bit_count));
+    expect(Key == std.meta.IntType(false, Key.bit_count));
     expect(Key.bit_count >= mask_bit_count);
-    const ShardKey = @IntType(false, mask_bit_count);
+    const ShardKey = std.meta.IntType(false, mask_bit_count);
     const shift_amount = Key.bit_count - ShardKey.bit_count;
     return struct {
         const Self = @This();
test/stage1/behavior/math.zig
@@ -270,7 +270,7 @@ fn testBinaryNot(x: u16) void {
 }
 
 test "small int addition" {
-    var x: @IntType(false, 2) = 0;
+    var x: u2 = 0;
     expect(x == 0);
 
     x += 1;
test/stage1/behavior/misc.zig
@@ -24,35 +24,6 @@ test "call disabled extern fn" {
     disabledExternFn();
 }
 
-test "@IntType builtin" {
-    expect(@IntType(true, 8) == i8);
-    expect(@IntType(true, 16) == i16);
-    expect(@IntType(true, 32) == i32);
-    expect(@IntType(true, 64) == i64);
-
-    expect(@IntType(false, 8) == u8);
-    expect(@IntType(false, 16) == u16);
-    expect(@IntType(false, 32) == u32);
-    expect(@IntType(false, 64) == u64);
-
-    expect(i8.bit_count == 8);
-    expect(i16.bit_count == 16);
-    expect(i32.bit_count == 32);
-    expect(i64.bit_count == 64);
-
-    expect(i8.is_signed);
-    expect(i16.is_signed);
-    expect(i32.is_signed);
-    expect(i64.is_signed);
-    expect(isize.is_signed);
-
-    expect(!u8.is_signed);
-    expect(!u16.is_signed);
-    expect(!u32.is_signed);
-    expect(!u64.is_signed);
-    expect(!usize.is_signed);
-}
-
 test "floating point primitive bit counts" {
     expect(f16.bit_count == 16);
     expect(f32.bit_count == 32);
test/stage1/behavior/reflection.zig
@@ -16,9 +16,9 @@ test "reflection: function return type, var args, and param types" {
         expect(@TypeOf(dummy).ReturnType == i32);
         expect(!@TypeOf(dummy).is_var_args);
         expect(@TypeOf(dummy).arg_count == 3);
-        expect(@ArgType(@TypeOf(dummy), 0) == bool);
-        expect(@ArgType(@TypeOf(dummy), 1) == i32);
-        expect(@ArgType(@TypeOf(dummy), 2) == f32);
+        expect(@typeInfo(@TypeOf(dummy)).Fn.args[0].arg_type.? == bool);
+        expect(@typeInfo(@TypeOf(dummy)).Fn.args[1].arg_type.? == i32);
+        expect(@typeInfo(@TypeOf(dummy)).Fn.args[2].arg_type.? == f32);
     }
 }
 
test/compile_errors.zig
@@ -1657,7 +1657,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
         \\    var ptr: [*c]u8 = (1 << 64) + 1;
         \\}
         \\export fn b() void {
-        \\    var x: @IntType(false, 65) = 0x1234;
+        \\    var x: u65 = 0x1234;
         \\    var ptr: [*c]u8 = x;
         \\}
     , &[_][]const u8{
@@ -1896,7 +1896,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
 
     cases.add("exceeded maximum bit width of integer",
         \\export fn entry1() void {
-        \\    const T = @IntType(false, 65536);
+        \\    const T = u65536;
         \\}
         \\export fn entry2() void {
         \\    var x: i65536 = 1;
@@ -5598,7 +5598,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
     });
 
     cases.add("globally shadowing a primitive type",
-        \\const u16 = @intType(false, 8);
+        \\const u16 = u8;
         \\export fn entry() void {
         \\    const a: u16 = 300;
         \\}
@@ -5939,23 +5939,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
         "tmp.zig:2:1: error: invalid character: '\\t'",
     });
 
-    cases.add("@ArgType given non function parameter",
-        \\comptime {
-        \\    _ = @ArgType(i32, 3);
-        \\}
-    , &[_][]const u8{
-        "tmp.zig:2:18: error: expected function, found 'i32'",
-    });
-
-    cases.add("@ArgType arg index out of bounds",
-        \\comptime {
-        \\    _ = @ArgType(@TypeOf(add), 2);
-        \\}
-        \\fn add(a: i32, b: i32) i32 { return a + b; }
-    , &[_][]const u8{
-        "tmp.zig:2:32: error: arg index 2 out of bounds; 'fn(i32, i32) i32' has 2 arguments",
-    });
-
     cases.add("calling var args extern function, passing array instead of pointer",
         \\export fn entry() void {
         \\    foo("hello".*,);
@@ -6379,15 +6362,6 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
         "tmp.zig:3:25: error: ReturnType has not been resolved because 'fn(var) var' is generic",
     });
 
-    cases.add("getting @ArgType of generic function",
-        \\fn generic(a: var) void {}
-        \\comptime {
-        \\    _ = @ArgType(@TypeOf(generic), 0);
-        \\}
-    , &[_][]const u8{
-        "tmp.zig:3:36: error: @ArgType could not resolve the type of arg 0 because 'fn(var) var' is generic",
-    });
-
     cases.add("unsupported modifier at start of asm output constraint",
         \\export fn foo() void {
         \\    var bar: u32 = 3;