Commit 3e9ab6aa7b
Changed files (2)
src
arch
wasm
test
behavior
src/arch/wasm/CodeGen.zig
@@ -918,8 +918,11 @@ fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!vo
try func.addInst(.{ .tag = tag, .data = .{ .label = label } });
}
-fn addImm32(func: *CodeGen, imm: i32) error{OutOfMemory}!void {
- try func.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = imm } });
+/// Accepts an unsigned 32bit integer rather than a signed integer to
+/// prevent us from having to bitcast multiple times as most values
+/// within codegen are represented as unsigned rather than signed.
+fn addImm32(func: *CodeGen, imm: u32) error{OutOfMemory}!void {
+ try func.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } });
}
/// Accepts an unsigned 64bit integer rather than a signed integer to
@@ -1049,7 +1052,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
.dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?)
.none, .stack => {}, // no-op
.local => |idx| try func.addLabel(.local_get, idx.value),
- .imm32 => |val| try func.addImm32(@as(i32, @bitCast(val))),
+ .imm32 => |val| try func.addImm32(val),
.imm64 => |val| try func.addImm64(val),
.imm128 => |val| try func.addImm128(val),
.float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
@@ -1467,7 +1470,7 @@ fn lowerToStack(func: *CodeGen, value: WValue) !void {
if (offset.value > 0) {
switch (func.arch()) {
.wasm32 => {
- try func.addImm32(@as(i32, @bitCast(offset.value)));
+ try func.addImm32(offset.value);
try func.addTag(.i32_add);
},
.wasm64 => {
@@ -1809,7 +1812,7 @@ fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: en
if (offset + ptr_value.offset() > 0) {
switch (func.arch()) {
.wasm32 => {
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(offset + ptr_value.offset())))));
+ try func.addImm32(@intCast(offset + ptr_value.offset()));
try func.addTag(.i32_add);
},
.wasm64 => {
@@ -2794,11 +2797,9 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
return func.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
};
- const op = try operand.toLocal(func, ty);
-
- try func.emitWValue(op);
switch (wasm_bits) {
32 => {
+ try func.emitWValue(operand);
if (wasm_bits != int_bits) {
try func.addImm32(wasm_bits - int_bits);
try func.addTag(.i32_shl);
@@ -2806,20 +2807,19 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addImm32(31);
try func.addTag(.i32_shr_s);
- const tmp = try func.allocLocal(ty);
+ var tmp = try func.allocLocal(ty);
+ defer tmp.free(func);
try func.addLabel(.local_tee, tmp.local.value);
- try func.emitWValue(op);
+ try func.emitWValue(operand);
try func.addTag(.i32_xor);
try func.emitWValue(tmp);
try func.addTag(.i32_sub);
- if (int_bits != wasm_bits) {
- try func.emitWValue(WValue{ .imm32 = (@as(u32, 1) << @intCast(int_bits)) - 1 });
- try func.addTag(.i32_and);
- }
+ _ = try func.wrapOperand(.stack, ty);
},
64 => {
+ try func.emitWValue(operand);
if (wasm_bits != int_bits) {
try func.addImm64(wasm_bits - int_bits);
try func.addTag(.i64_shl);
@@ -2827,20 +2827,45 @@ fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.addImm64(63);
try func.addTag(.i64_shr_s);
- const tmp = try func.allocLocal(ty);
+ var tmp = try func.allocLocal(ty);
+ defer tmp.free(func);
try func.addLabel(.local_tee, tmp.local.value);
- try func.emitWValue(op);
+ try func.emitWValue(operand);
try func.addTag(.i64_xor);
try func.emitWValue(tmp);
try func.addTag(.i64_sub);
- if (int_bits != wasm_bits) {
- try func.emitWValue(WValue{ .imm64 = (@as(u64, 1) << @intCast(int_bits)) - 1 });
- try func.addTag(.i64_and);
+ _ = try func.wrapOperand(.stack, ty);
+ },
+ 128 => {
+ const mask = try func.allocStack(Type.u128);
+ try func.emitWValue(mask);
+ try func.emitWValue(mask);
+
+ _ = try func.load(operand, Type.u64, 8);
+ if (int_bits != 128) {
+ try func.addImm64(128 - int_bits);
+ try func.addTag(.i64_shl);
}
+ try func.addImm64(63);
+ try func.addTag(.i64_shr_s);
+
+ var tmp = try func.allocLocal(Type.u64);
+ defer tmp.free(func);
+ try func.addLabel(.local_tee, tmp.local.value);
+ try func.store(.stack, .stack, Type.u64, mask.offset() + 0);
+ try func.emitWValue(tmp);
+ try func.store(.stack, .stack, Type.u64, mask.offset() + 8);
+
+ const a = try func.binOpBigInt(operand, mask, Type.u128, .xor);
+ const b = try func.binOpBigInt(a, mask, Type.u128, .sub);
+ const result = try func.wrapOperand(b, ty);
+
+ func.finishAir(inst, result, &.{ty_op.operand});
+ return;
},
- else => return func.fail("TODO: Implement airAbs for {}", .{ty.fmt(mod)}),
+ else => unreachable,
}
const result = try (WValue{ .stack = {} }).toLocal(func, ty);
@@ -2932,7 +2957,7 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
switch (float_bits) {
16 => {
try func.emitWValue(arg);
- try func.addImm32(std.math.minInt(i16));
+ try func.addImm32(0x8000);
try func.addTag(.i32_xor);
return .stack;
},
@@ -3019,44 +3044,48 @@ fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
/// Wraps an operand based on a given type's bitsize.
/// Asserts `Type` is <= 128 bits.
-/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
+/// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed.
fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
const mod = func.bin_file.base.comp.module.?;
assert(ty.abiSize(mod) <= 16);
- const bitsize = @as(u16, @intCast(ty.bitSize(mod)));
- const wasm_bits = toWasmBits(bitsize) orelse {
- return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
+ const int_bits = @as(u16, @intCast(ty.bitSize(mod))); // TODO use ty.intInfo(mod).bits
+ const wasm_bits = toWasmBits(int_bits) orelse {
+ return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
};
- if (wasm_bits == bitsize) return operand;
+ if (wasm_bits == int_bits) return operand;
- if (wasm_bits == 128) {
- assert(operand != .stack);
- const lsb = try func.load(operand, Type.u64, 8);
+ switch (wasm_bits) {
+ 32 => {
+ try func.emitWValue(operand);
+ try func.addImm32((@as(u32, 1) << @intCast(int_bits)) - 1);
+ try func.addTag(.i32_and);
+ return .stack;
+ },
+ 64 => {
+ try func.emitWValue(operand);
+ try func.addImm64((@as(u64, 1) << @intCast(int_bits)) - 1);
+ try func.addTag(.i64_and);
+ return .stack;
+ },
+ 128 => {
+ assert(operand != .stack);
+ const result = try func.allocStack(ty);
- const result_ptr = try func.allocStack(ty);
- try func.emitWValue(result_ptr);
- try func.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
- const result = (@as(u64, 1) << @as(u6, @intCast(64 - (wasm_bits - bitsize)))) - 1;
- try func.emitWValue(result_ptr);
- _ = try func.load(operand, Type.u64, 0);
- try func.addImm64(result);
- try func.addTag(.i64_and);
- try func.addMemArg(.i64_store, .{ .offset = result_ptr.offset(), .alignment = 8 });
- return result_ptr;
- }
+ try func.emitWValue(result);
+ _ = try func.load(operand, Type.u64, 0);
+ try func.store(.stack, .stack, Type.u64, result.offset());
- const result = (@as(u64, 1) << @as(u6, @intCast(bitsize))) - 1;
- try func.emitWValue(operand);
- if (bitsize <= 32) {
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(result)))));
- try func.addTag(.i32_and);
- } else if (bitsize <= 64) {
- try func.addImm64(result);
- try func.addTag(.i64_and);
- } else unreachable;
+ try func.emitWValue(result);
+ _ = try func.load(operand, Type.u64, 8);
+ try func.addImm64((@as(u64, 1) << @intCast(int_bits - 64)) - 1);
+ try func.addTag(.i64_and);
+ try func.store(.stack, .stack, Type.u64, result.offset() + 8);
- return WValue{ .stack = {} };
+ return result;
+ },
+ else => unreachable,
+ }
}
fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
@@ -4062,11 +4091,11 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (lowest < 0) {
// since br_table works using indexes, starting from '0', we must ensure all values
// we put inside, are atleast 0.
- try func.addImm32(lowest * -1);
+ try func.addImm32(@bitCast(lowest * -1));
try func.addTag(.i32_add);
} else if (lowest > 0) {
// make the index start from 0 by substracting the lowest value
- try func.addImm32(lowest);
+ try func.addImm32(@bitCast(lowest));
try func.addTag(.i32_sub);
}
@@ -4588,7 +4617,7 @@ fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into slice
try func.emitWValue(index);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
+ try func.addImm32(@intCast(elem_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4618,7 +4647,7 @@ fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into slice
try func.emitWValue(index);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
+ try func.addImm32(@intCast(elem_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4737,7 +4766,7 @@ fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into slice
try func.emitWValue(index);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
+ try func.addImm32(@intCast(elem_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4776,7 +4805,7 @@ fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// calculate index into ptr
try func.emitWValue(index);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
+ try func.addImm32(@intCast(elem_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
@@ -4804,7 +4833,7 @@ fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
try func.lowerToStack(ptr);
try func.emitWValue(offset);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(pointee_ty.abiSize(mod))))));
+ try func.addImm32(@intCast(pointee_ty.abiSize(mod)));
try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
@@ -4948,7 +4977,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
if (isByRef(array_ty, mod)) {
try func.lowerToStack(array);
try func.emitWValue(index);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
+ try func.addImm32(@intCast(elem_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
} else {
@@ -4981,7 +5010,7 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// Is a non-unrolled vector (v128)
try func.lowerToStack(stack_vec);
try func.emitWValue(index);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(elem_size)))));
+ try func.addImm32(@intCast(elem_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
},
@@ -5717,7 +5746,7 @@ fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result = if (field_offset != 0) result: {
const base = try func.buildPointerOffset(field_ptr, 0, .new);
try func.addLabel(.local_get, base.local.value);
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(field_offset)))));
+ try func.addImm32(@intCast(field_offset));
try func.addTag(.i32_sub);
try func.addLabel(.local_set, base.local.value);
break :result base;
@@ -5938,7 +5967,7 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(operand);
switch (func.arch()) {
.wasm32 => {
- try func.addImm32(@as(i32, @bitCast(@as(u32, @intCast(abi_size)))));
+ try func.addImm32(@intCast(abi_size));
try func.addTag(.i32_mul);
try func.addTag(.i32_add);
},
@@ -7078,7 +7107,7 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
if (wasm_bits != int_info.bits and op == .add) {
const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1));
const imm_val = switch (wasm_bits) {
- 32 => WValue{ .imm32 = @as(u32, @intCast(val)) },
+ 32 => WValue{ .imm32 = @intCast(val) },
64 => WValue{ .imm64 = val },
else => unreachable,
};
@@ -7088,8 +7117,8 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
_ = try func.cmp(bin_result, imm_val, ty, .lt);
} else {
switch (wasm_bits) {
- 32 => try func.addImm32(if (op == .add) @as(i32, -1) else 0),
- 64 => try func.addImm64(if (op == .add) @as(u64, @bitCast(@as(i64, -1))) else 0),
+ 32 => try func.addImm32(if (op == .add) std.math.maxInt(u32) else 0),
+ 64 => try func.addImm64(if (op == .add) std.math.maxInt(u64) else 0),
else => unreachable,
}
try func.emitWValue(bin_result);
@@ -7192,21 +7221,21 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (wasm_bits) {
32 => blk: {
if (!is_signed) {
- try func.addImm32(-1);
+ try func.addImm32(std.math.maxInt(u32));
break :blk;
}
- try func.addImm32(std.math.minInt(i32));
- try func.addImm32(std.math.maxInt(i32));
+ try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
+ try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
_ = try func.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
try func.addTag(.select);
},
64 => blk: {
if (!is_signed) {
- try func.addImm64(@as(u64, @bitCast(@as(i64, -1))));
+ try func.addImm64(std.math.maxInt(u64));
break :blk;
}
- try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64)))));
- try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64)))));
+ try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
+ try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
_ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
try func.addTag(.select);
},
@@ -7236,23 +7265,23 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
switch (wasm_bits) {
32 => blk: {
if (!is_signed) {
- try func.addImm32(-1);
+ try func.addImm32(std.math.maxInt(u32));
break :blk;
}
- try func.addImm32(std.math.minInt(i32));
- try func.addImm32(std.math.maxInt(i32));
+ try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
+ try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
_ = try func.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt);
try func.addTag(.select);
},
64 => blk: {
if (!is_signed) {
- try func.addImm64(@as(u64, @bitCast(@as(i64, -1))));
+ try func.addImm64(std.math.maxInt(u64));
break :blk;
}
- try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.minInt(i64)))));
- try func.addImm64(@as(u64, @bitCast(@as(i64, std.math.maxInt(i64)))));
+ try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
+ try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
_ = try func.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt);
try func.addTag(.select);
},
@@ -7546,7 +7575,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
// lower operand to determine jump table target
try func.emitWValue(operand);
- try func.addImm32(@as(i32, @intCast(lowest.?)));
+ try func.addImm32(lowest.?);
try func.addTag(.i32_sub);
// Account for default branch so always add '1'
@@ -7641,7 +7670,7 @@ fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const result_ptr = if (isByRef(result_ty, mod)) val: {
try func.emitWValue(cmp_result);
- try func.addImm32(-1);
+ try func.addImm32(~@as(u32, 0));
try func.addTag(.i32_xor);
try func.addImm32(1);
try func.addTag(.i32_and);
@@ -7717,9 +7746,9 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const and_res = try func.binOp(value, operand, ty, .@"and");
if (wasm_bits == 32)
- try func.addImm32(-1)
+ try func.addImm32(~@as(u32, 0))
else if (wasm_bits == 64)
- try func.addImm64(@as(u64, @bitCast(@as(i64, -1))))
+ try func.addImm64(~@as(u64, 0))
else
return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
_ = try func.binOp(and_res, .stack, ty, .xor);
@@ -7849,9 +7878,9 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
try func.emitWValue(ptr);
const and_res = try func.binOp(result, operand, ty, .@"and");
if (wasm_bits == 32)
- try func.addImm32(-1)
+ try func.addImm32(~@as(u32, 0))
else if (wasm_bits == 64)
- try func.addImm64(@as(u64, @bitCast(@as(i64, -1))))
+ try func.addImm64(~@as(u64, 0))
else
return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
_ = try func.binOp(and_res, .stack, ty, .xor);
test/behavior/abs.zig
@@ -87,6 +87,70 @@ fn testAbsUnsignedIntegers() !void {
}
}
+test "@abs big int <= 128 bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
+
+ try comptime testAbsSignedBigInt();
+ try testAbsSignedBigInt();
+
+ try comptime testAbsUnsignedBigInt();
+ try testAbsUnsignedBigInt();
+}
+
+fn abs(comptime T: type, a: T) std.meta.Int(.unsigned, @typeInfo(T).Int.bits) {
+ return @abs(a);
+}
+
+fn testAbsSignedBigInt() !void {
+ try expect(abs(i65, -18446744073709551616) == 18446744073709551616);
+ try expect(abs(i65, 18446744073709551615) == 18446744073709551615);
+ try expect(abs(i65, 1234) == 1234);
+ try expect(abs(i65, -1234) == 1234);
+
+ try expect(abs(i84, -9671406556917033397649408) == 9671406556917033397649408);
+ try expect(abs(i84, 9671406556917033397649407) == 9671406556917033397649407);
+ try expect(abs(i84, 1234) == 1234);
+ try expect(abs(i84, -1234) == 1234);
+
+ try expect(abs(i96, -39614081257132168796771975168) == 39614081257132168796771975168);
+ try expect(abs(i96, 39614081257132168796771975167) == 39614081257132168796771975167);
+ try expect(abs(i96, 1234) == 1234);
+ try expect(abs(i96, -1234) == 1234);
+
+ try expect(abs(i105, -20282409603651670423947251286016) == 20282409603651670423947251286016);
+ try expect(abs(i105, 20282409603651670423947251286015) == 20282409603651670423947251286015);
+ try expect(abs(i105, 1234) == 1234);
+ try expect(abs(i105, -1234) == 1234);
+
+ try expect(abs(i128, -170141183460469231731687303715884105728) == 170141183460469231731687303715884105728);
+ try expect(abs(i128, 170141183460469231731687303715884105727) == 170141183460469231731687303715884105727);
+ try expect(abs(i128, 1234) == 1234);
+ try expect(abs(i128, -1234) == 1234);
+}
+
+fn testAbsUnsignedBigInt() !void {
+ try expect(abs(u65, 36893488147419103231) == 36893488147419103231);
+ try expect(abs(u65, 1234) == 1234);
+
+ try expect(abs(u84, 19342813113834066795298815) == 19342813113834066795298815);
+ try expect(abs(u84, 1234) == 1234);
+
+ try expect(abs(u96, 79228162514264337593543950335) == 79228162514264337593543950335);
+ try expect(abs(u96, 1234) == 1234);
+
+ try expect(abs(u105, 40564819207303340847894502572031) == 40564819207303340847894502572031);
+ try expect(abs(u105, 1234) == 1234);
+
+ try expect(abs(u128, 340282366920938463463374607431768211455) == 340282366920938463463374607431768211455);
+ try expect(abs(u128, 1234) == 1234);
+}
+
test "@abs floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO