Commit 05afb3f358

Andrew Kelley <andrew@ziglang.org>
2022-11-24 04:49:13
Merge pull request #13632 from ziglang/cbe
1 parent 703552e
Changed files (5)
lib
src
test
behavior
lib/zig.h
@@ -344,6 +344,12 @@ static inline zig_bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_
 #endif
 }
 
+static inline void zig_vaddo_u32(zig_u8 *ov, zig_u32 *res, int n,
+    const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_u32(&res[i], lhs[i], rhs[i], bits);
+}
+
 zig_extern zig_i32  __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
 static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
@@ -358,6 +364,12 @@ static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_
     return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
 }
 
+static inline void zig_vaddo_i32(zig_u8 *ov, zig_i32 *res, int n,
+    const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_i32(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
     zig_u64 full_res;
@@ -370,6 +382,12 @@ static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_
 #endif
 }
 
+static inline void zig_vaddo_u64(zig_u8 *ov, zig_u64 *res, int n,
+    const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_u64(&res[i], lhs[i], rhs[i], bits);
+}
+
 zig_extern zig_i64  __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
 static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
@@ -384,6 +402,12 @@ static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_
     return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
 }
 
+static inline void zig_vaddo_i64(zig_u8 *ov, zig_i64 *res, int n,
+    const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_i64(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
     zig_u8 full_res;
@@ -395,6 +419,12 @@ static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 b
 #endif
 }
 
+static inline void zig_vaddo_u8(zig_u8 *ov, zig_u8 *res, int n,
+    const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_u8(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
     zig_i8 full_res;
@@ -406,6 +436,12 @@ static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 b
 #endif
 }
 
+static inline void zig_vaddo_i8(zig_u8 *ov, zig_i8 *res, int n,
+    const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_i8(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
     zig_u16 full_res;
@@ -417,6 +453,12 @@ static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_
 #endif
 }
 
+static inline void zig_vaddo_u16(zig_u8 *ov, zig_u16 *res, int n,
+    const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_u16(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
 #if zig_has_builtin(add_overflow)
     zig_i16 full_res;
@@ -428,6 +470,12 @@ static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_
 #endif
 }
 
+static inline void zig_vaddo_i16(zig_u8 *ov, zig_i16 *res, int n,
+    const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_addo_i16(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
     zig_u32 full_res;
@@ -440,6 +488,12 @@ static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_
 #endif
 }
 
+static inline void zig_vsubo_u32(zig_u8 *ov, zig_u32 *res, int n,
+    const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_u32(&res[i], lhs[i], rhs[i], bits);
+}
+
 zig_extern zig_i32  __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
 static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
@@ -454,6 +508,12 @@ static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_
     return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
 }
 
+static inline void zig_vsubo_i32(zig_u8 *ov, zig_i32 *res, int n,
+    const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_i32(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
     zig_u64 full_res;
@@ -466,6 +526,12 @@ static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_
 #endif
 }
 
+static inline void zig_vsubo_u64(zig_u8 *ov, zig_u64 *res, int n,
+    const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_u64(&res[i], lhs[i], rhs[i], bits);
+}
+
 zig_extern zig_i64  __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
 static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
@@ -480,6 +546,12 @@ static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_
     return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
 }
 
+static inline void zig_vsubo_i64(zig_u8 *ov, zig_i64 *res, int n,
+    const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_i64(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
     zig_u8 full_res;
@@ -491,6 +563,12 @@ static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 b
 #endif
 }
 
+static inline void zig_vsubo_u8(zig_u8 *ov, zig_u8 *res, int n,
+    const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_u8(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
     zig_i8 full_res;
@@ -502,6 +580,13 @@ static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 b
 #endif
 }
 
+static inline void zig_vsubo_i8(zig_u8 *ov, zig_i8 *res, int n,
+    const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_i8(&res[i], lhs[i], rhs[i], bits);
+}
+
+
 static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
     zig_u16 full_res;
@@ -513,6 +598,13 @@ static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_
 #endif
 }
 
+static inline void zig_vsubo_u16(zig_u8 *ov, zig_u16 *res, int n,
+    const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_u16(&res[i], lhs[i], rhs[i], bits);
+}
+
+
 static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
 #if zig_has_builtin(sub_overflow)
     zig_i16 full_res;
@@ -524,6 +616,12 @@ static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_
 #endif
 }
 
+static inline void zig_vsubo_i16(zig_u8 *ov, zig_i16 *res, int n,
+    const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_subo_i16(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
     zig_u32 full_res;
@@ -536,6 +634,12 @@ static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_
 #endif
 }
 
+static inline void zig_vmulo_u32(zig_u8 *ov, zig_u32 *res, int n,
+    const zig_u32 *lhs, const zig_u32 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u32(&res[i], lhs[i], rhs[i], bits);
+}
+
 zig_extern zig_i32  __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
 static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
@@ -550,6 +654,12 @@ static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_
     return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
 }
 
+static inline void zig_vmulo_i32(zig_u8 *ov, zig_i32 *res, int n,
+    const zig_i32 *lhs, const zig_i32 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i32(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
     zig_u64 full_res;
@@ -562,6 +672,12 @@ static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_
 #endif
 }
 
+static inline void zig_vmulo_u64(zig_u8 *ov, zig_u64 *res, int n,
+    const zig_u64 *lhs, const zig_u64 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u64(&res[i], lhs[i], rhs[i], bits);
+}
+
 zig_extern zig_i64  __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
 static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
@@ -576,6 +692,12 @@ static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_
     return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
 }
 
+static inline void zig_vmulo_i64(zig_u8 *ov, zig_i64 *res, int n,
+    const zig_i64 *lhs, const zig_i64 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i64(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
     zig_u8 full_res;
@@ -587,6 +709,12 @@ static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 b
 #endif
 }
 
+static inline void zig_vmulo_u8(zig_u8 *ov, zig_u8 *res, int n,
+    const zig_u8 *lhs, const zig_u8 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u8(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
     zig_i8 full_res;
@@ -598,6 +726,12 @@ static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 b
 #endif
 }
 
+static inline void zig_vmulo_i8(zig_u8 *ov, zig_i8 *res, int n,
+    const zig_i8 *lhs, const zig_i8 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i8(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
     zig_u16 full_res;
@@ -609,6 +743,12 @@ static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_
 #endif
 }
 
+static inline void zig_vmulo_u16(zig_u8 *ov, zig_u16 *res, int n,
+    const zig_u16 *lhs, const zig_u16 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_u16(&res[i], lhs[i], rhs[i], bits);
+}
+
 static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
 #if zig_has_builtin(mul_overflow)
     zig_i16 full_res;
@@ -620,6 +760,12 @@ static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_
 #endif
 }
 
+static inline void zig_vmulo_i16(zig_u8 *ov, zig_i16 *res, int n,
+    const zig_i16 *lhs, const zig_i16 *rhs, zig_u8 bits)
+{
+    for (int i = 0; i < n; ++i) ov[i] = zig_mulo_i16(&res[i], lhs[i], rhs[i], bits);
+}
+
 #define zig_int_builtins(w) \
     static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
         return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \
@@ -846,10 +992,8 @@ static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
     full_res = __builtin_bitreverse8(val);
 #else
     static zig_u8 const lut[0x10] = {
-        0b0000, 0b1000, 0b0100, 0b1100,
-        0b0010, 0b1010, 0b0110, 0b1110,
-        0b0001, 0b1001, 0b0101, 0b1101,
-        0b0011, 0b1011, 0b0111, 0b1111,
+        0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
+        0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf
     };
     full_res = lut[val >> 0 & 0xF] << 4 | lut[val >> 4 & 0xF] << 0;
 #endif
src/codegen/c.zig
@@ -18,6 +18,12 @@ const Air = @import("../Air.zig");
 const Liveness = @import("../Liveness.zig");
 const CType = @import("../type.zig").CType;
 
+const target_util = @import("../target.zig");
+const libcFloatPrefix = target_util.libcFloatPrefix;
+const libcFloatSuffix = target_util.libcFloatSuffix;
+const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
+const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
+
 const Mutability = enum { Const, ConstArgument, Mut };
 const BigIntLimb = std.math.big.Limb;
 const BigInt = std.math.big.int;
@@ -733,7 +739,7 @@ pub const DeclGen = struct {
                         try dg.fmtIntLiteral(ty.errorUnionSet(), val),
                     });
                 },
-                .Array => {
+                .Array, .Vector => {
                     if (location != .Initializer) {
                         try writer.writeByte('(');
                         try dg.renderTypecast(writer, ty);
@@ -770,10 +776,10 @@ pub const DeclGen = struct {
                 .BoundFn,
                 .Opaque,
                 => unreachable,
+
                 .Fn,
                 .Frame,
                 .AnyFrame,
-                .Vector,
                 => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
                     @tagName(tag),
                 }),
@@ -922,7 +928,7 @@ pub const DeclGen = struct {
                 => try dg.renderParentPtr(writer, val, ty),
                 else => unreachable,
             },
-            .Array => {
+            .Array, .Vector => {
                 if (location == .FunctionArgument) {
                     try writer.writeByte('(');
                     try dg.renderTypecast(writer, ty);
@@ -1200,7 +1206,6 @@ pub const DeclGen = struct {
 
             .Frame,
             .AnyFrame,
-            .Vector,
             => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
                 @tagName(tag),
             }),
@@ -1746,7 +1751,7 @@ pub const DeclGen = struct {
                 if (t.isVolatilePtr()) try w.writeAll(" volatile");
                 return w.writeAll(" *");
             },
-            .Array => {
+            .Array, .Vector => {
                 var array_pl = Type.Payload.Array{ .base = .{ .tag = .array }, .data = .{
                     .len = t.arrayLenIncludingSentinel(),
                     .elem_type = t.childType(),
@@ -1859,7 +1864,6 @@ pub const DeclGen = struct {
 
             .Frame,
             .AnyFrame,
-            .Vector,
             => |tag| return dg.fail("TODO: C backend: implement value of type {s}", .{
                 @tagName(tag),
             }),
@@ -3180,25 +3184,43 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info:
     const rhs = try f.resolveInst(bin_op.rhs);
 
     const inst_ty = f.air.typeOfIndex(inst);
-    const scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
+    const vector_ty = f.air.typeOf(bin_op.lhs);
+    const scalar_ty = vector_ty.scalarType();
     const w = f.object.writer();
 
     const local = try f.allocLocal(inst_ty, .Mut);
     try w.writeAll(";\n");
 
-    try f.writeCValueMember(w, local, .{ .field = 1 });
-    try w.writeAll(" = zig_");
-    try w.writeAll(operation);
-    try w.writeAll("o_");
-    try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
-    try w.writeAll("(&");
-    try f.writeCValueMember(w, local, .{ .field = 0 });
-    try w.writeAll(", ");
+    switch (vector_ty.zigTypeTag()) {
+        .Vector => {
+            try w.writeAll("zig_v");
+            try w.writeAll(operation);
+            try w.writeAll("o_");
+            try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
+            try w.writeAll("(");
+            try f.writeCValueMember(w, local, .{ .field = 1 });
+            try w.writeAll(", ");
+            try f.writeCValueMember(w, local, .{ .field = 0 });
+            try w.print(", {d}, ", .{vector_ty.vectorLen()});
+        },
+        else => {
+            try f.writeCValueMember(w, local, .{ .field = 1 });
+            try w.writeAll(" = zig_");
+            try w.writeAll(operation);
+            try w.writeAll("o_");
+            try f.object.dg.renderTypeForBuiltinFnName(w, scalar_ty);
+            try w.writeAll("(&");
+            try f.writeCValueMember(w, local, .{ .field = 0 });
+            try w.writeAll(", ");
+        },
+    }
+
     try f.writeCValue(w, lhs, .FunctionArgument);
     try w.writeAll(", ");
     try f.writeCValue(w, rhs, .FunctionArgument);
     try f.object.dg.renderBuiltinInfo(w, scalar_ty, info);
     try w.writeAll(");\n");
+
     return local;
 }
 
@@ -5206,16 +5228,153 @@ fn airShuffle(f: *Function, inst: Air.Inst.Index) !CValue {
 fn airReduce(f: *Function, inst: Air.Inst.Index) !CValue {
     if (f.liveness.isUnused(inst)) return CValue.none;
 
-    const inst_ty = f.air.typeOfIndex(inst);
+    const target = f.object.dg.module.getTarget();
+    const scalar_ty = f.air.typeOfIndex(inst);
     const reduce = f.air.instructions.items(.data)[inst].reduce;
     const operand = try f.resolveInst(reduce.operand);
+    const operand_ty = f.air.typeOf(reduce.operand);
+    const vector_len = operand_ty.vectorLen();
     const writer = f.object.writer();
-    const local = try f.allocLocal(inst_ty, .Const);
+
+    const Op = union(enum) {
+        call_fn: []const u8,
+        infix: []const u8,
+        ternary: []const u8,
+    };
+    var fn_name_buf: [64]u8 = undefined;
+    const op: Op = switch (reduce.operation) {
+        .And => .{ .infix = " &= " },
+        .Or => .{ .infix = " |= " },
+        .Xor => .{ .infix = " ^= " },
+        .Min => switch (scalar_ty.zigTypeTag()) {
+            .Int => Op{ .ternary = " < " },
+            .Float => op: {
+                const float_bits = scalar_ty.floatBits(target);
+                break :op Op{
+                    .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmin{s}", .{
+                        libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
+                    }) catch unreachable,
+                };
+            },
+            else => unreachable,
+        },
+        .Max => switch (scalar_ty.zigTypeTag()) {
+            .Int => Op{ .ternary = " > " },
+            .Float => op: {
+                const float_bits = scalar_ty.floatBits(target);
+                break :op Op{
+                    .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "{s}fmax{s}", .{
+                        libcFloatPrefix(float_bits), libcFloatSuffix(float_bits),
+                    }) catch unreachable,
+                };
+            },
+            else => unreachable,
+        },
+        .Add => switch (scalar_ty.zigTypeTag()) {
+            .Int => Op{ .infix = " += " },
+            .Float => op: {
+                const float_bits = scalar_ty.floatBits(target);
+                break :op Op{
+                    .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__add{s}f3", .{
+                        compilerRtFloatAbbrev(float_bits),
+                    }) catch unreachable,
+                };
+            },
+            else => unreachable,
+        },
+        .Mul => switch (scalar_ty.zigTypeTag()) {
+            .Int => Op{ .infix = " *= " },
+            .Float => op: {
+                const float_bits = scalar_ty.floatBits(target);
+                break :op Op{
+                    .call_fn = std.fmt.bufPrintZ(&fn_name_buf, "__mul{s}f3", .{
+                        compilerRtFloatAbbrev(float_bits),
+                    }) catch unreachable,
+                };
+            },
+            else => unreachable,
+        },
+    };
+
+    // Reduce a vector by repeatedly applying a function to produce an
+    // accumulated result.
+    //
+    // Equivalent to:
+    //   reduce: {
+    //     var i: usize = 0;
+    //     var accum: T = init;
+    //     while (i < vec.len) : (i += 1) {
+    //       accum = func(accum, vec[i]);
+    //     }
+    //     break :reduce accum;
+    //   }
+    const it = try f.allocLocal(Type.usize, .Mut);
+    try writer.writeAll(" = 0;\n");
+
+    const accum = try f.allocLocal(scalar_ty, .Mut);
     try writer.writeAll(" = ");
 
-    _ = operand;
-    _ = local;
-    return f.fail("TODO: C backend: implement airReduce", .{});
+    const init_val = switch (reduce.operation) {
+        .And, .Or, .Xor, .Add => "0",
+        .Min => switch (scalar_ty.zigTypeTag()) {
+            .Int => "TODO_intmax",
+            .Float => "TODO_nan",
+            else => unreachable,
+        },
+        .Max => switch (scalar_ty.zigTypeTag()) {
+            .Int => "TODO_intmin",
+            .Float => "TODO_nan",
+            else => unreachable,
+        },
+        .Mul => "1",
+    };
+    try writer.writeAll(init_val);
+    try writer.writeAll(";");
+    try f.object.indent_writer.insertNewline();
+    try writer.writeAll("for(;");
+    try f.writeCValue(writer, it, .Other);
+    try writer.print("<{d};++", .{vector_len});
+    try f.writeCValue(writer, it, .Other);
+    try writer.writeAll(") ");
+    try f.writeCValue(writer, accum, .Other);
+
+    switch (op) {
+        .call_fn => |fn_name| {
+            try writer.print(" = {s}(", .{fn_name});
+            try f.writeCValue(writer, accum, .FunctionArgument);
+            try writer.writeAll(", ");
+            try f.writeCValue(writer, operand, .Other);
+            try writer.writeAll("[");
+            try f.writeCValue(writer, it, .Other);
+            try writer.writeAll("])");
+        },
+        .infix => |ass| {
+            try writer.writeAll(ass);
+            try f.writeCValue(writer, operand, .Other);
+            try writer.writeAll("[");
+            try f.writeCValue(writer, it, .Other);
+            try writer.writeAll("]");
+        },
+        .ternary => |cmp| {
+            try writer.writeAll(" = ");
+            try f.writeCValue(writer, accum, .Other);
+            try writer.writeAll(cmp);
+            try f.writeCValue(writer, operand, .Other);
+            try writer.writeAll("[");
+            try f.writeCValue(writer, it, .Other);
+            try writer.writeAll("] ? ");
+            try f.writeCValue(writer, accum, .Other);
+            try writer.writeAll(" : ");
+            try f.writeCValue(writer, operand, .Other);
+            try writer.writeAll("[");
+            try f.writeCValue(writer, it, .Other);
+            try writer.writeAll("]");
+        },
+    }
+
+    try writer.writeAll(";\n");
+
+    return accum;
 }
 
 fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
@@ -5234,7 +5393,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
     const local = try f.allocLocal(inst_ty, mutability);
     try writer.writeAll(" = ");
     switch (inst_ty.zigTypeTag()) {
-        .Array => {
+        .Array, .Vector => {
             const elem_ty = inst_ty.childType();
             try writer.writeByte('{');
             var empty = true;
@@ -5354,7 +5513,6 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
                 try writer.writeAll(";\n");
             },
         },
-        .Vector => return f.fail("TODO: C backend: implement airAggregateInit for vectors", .{}),
         else => unreachable,
     }
 
@@ -5868,7 +6026,7 @@ fn lowerFnRetTy(ret_ty: Type, buffer: *LowerFnRetTyBuffer, target: std.Target) T
 
 fn lowersToArray(ty: Type, target: std.Target) bool {
     return switch (ty.zigTypeTag()) {
-        .Array => return true,
+        .Array, .Vector => return true,
         else => return ty.isAbiInt() and toCIntBits(@intCast(u32, ty.bitSize(target))) == null,
     };
 }
@@ -5877,7 +6035,7 @@ fn loweredArrayInfo(ty: Type, target: std.Target) ?Type.ArrayInfo {
     if (!lowersToArray(ty, target)) return null;
 
     switch (ty.zigTypeTag()) {
-        .Array => return ty.arrayInfo(),
+        .Array, .Vector => return ty.arrayInfo(),
         else => {
             const abi_size = ty.abiSize(target);
             const abi_align = ty.abiAlignment(target);
src/codegen/llvm.zig
@@ -16,7 +16,6 @@ const Package = @import("../Package.zig");
 const TypedValue = @import("../TypedValue.zig");
 const Air = @import("../Air.zig");
 const Liveness = @import("../Liveness.zig");
-const target_util = @import("../target.zig");
 const Value = @import("../value.zig").Value;
 const Type = @import("../type.zig").Type;
 const LazySrcLoc = Module.LazySrcLoc;
@@ -27,6 +26,12 @@ const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
 const arm_c_abi = @import("../arch/arm/abi.zig");
 const riscv_c_abi = @import("../arch/riscv64/abi.zig");
 
+const target_util = @import("../target.zig");
+const libcFloatPrefix = target_util.libcFloatPrefix;
+const libcFloatSuffix = target_util.libcFloatSuffix;
+const compilerRtFloatAbbrev = target_util.compilerRtFloatAbbrev;
+const compilerRtIntAbbrev = target_util.compilerRtIntAbbrev;
+
 const Error = error{ OutOfMemory, CodegenFail };
 
 pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
@@ -7328,46 +7333,6 @@ pub const FuncGen = struct {
         };
     }
 
-    fn libcFloatPrefix(float_bits: u16) []const u8 {
-        return switch (float_bits) {
-            16, 80 => "__",
-            32, 64, 128 => "",
-            else => unreachable,
-        };
-    }
-
-    fn libcFloatSuffix(float_bits: u16) []const u8 {
-        return switch (float_bits) {
-            16 => "h", // Non-standard
-            32 => "f",
-            64 => "",
-            80 => "x", // Non-standard
-            128 => "q", // Non-standard (mimics convention in GCC libquadmath)
-            else => unreachable,
-        };
-    }
-
-    fn compilerRtFloatAbbrev(float_bits: u16) []const u8 {
-        return switch (float_bits) {
-            16 => "h",
-            32 => "s",
-            64 => "d",
-            80 => "x",
-            128 => "t",
-            else => unreachable,
-        };
-    }
-
-    fn compilerRtIntAbbrev(bits: u16) []const u8 {
-        return switch (bits) {
-            16 => "h",
-            32 => "s",
-            64 => "d",
-            128 => "t",
-            else => "o", // Non-standard
-        };
-    }
-
     /// Creates a floating point comparison by lowering to the appropriate
     /// hardware instruction or softfloat routine for the target
     fn buildFloatCmp(
@@ -9034,12 +8999,10 @@ pub const FuncGen = struct {
         const target = self.dg.module.getTarget();
 
         const reduce = self.air.instructions.items(.data)[inst].reduce;
-        var operand = try self.resolveInst(reduce.operand);
+        const operand = try self.resolveInst(reduce.operand);
         const operand_ty = self.air.typeOf(reduce.operand);
         const scalar_ty = self.air.typeOfIndex(inst);
 
-        // TODO handle the fast math setting
-
         switch (reduce.operation) {
             .And => return self.builder.buildAndReduce(operand),
             .Or => return self.builder.buildOrReduce(operand),
src/target.zig
@@ -724,3 +724,43 @@ pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend
         else => return false,
     }
 }
+
+pub fn libcFloatPrefix(float_bits: u16) []const u8 {
+    return switch (float_bits) {
+        16, 80 => "__",
+        32, 64, 128 => "",
+        else => unreachable,
+    };
+}
+
+pub fn libcFloatSuffix(float_bits: u16) []const u8 {
+    return switch (float_bits) {
+        16 => "h", // Non-standard
+        32 => "f",
+        64 => "",
+        80 => "x", // Non-standard
+        128 => "q", // Non-standard (mimics convention in GCC libquadmath)
+        else => unreachable,
+    };
+}
+
+pub fn compilerRtFloatAbbrev(float_bits: u16) []const u8 {
+    return switch (float_bits) {
+        16 => "h",
+        32 => "s",
+        64 => "d",
+        80 => "x",
+        128 => "t",
+        else => unreachable,
+    };
+}
+
+pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
+    return switch (bits) {
+        16 => "h",
+        32 => "s",
+        64 => "d",
+        128 => "t",
+        else => "o", // Non-standard
+    };
+}
test/behavior/vector.zig
@@ -13,7 +13,6 @@ test "implicit cast vector to array - bool" {
     }
 
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -294,7 +293,6 @@ test "vector @splat" {
 
 test "load vector elements via comptime index" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -317,7 +315,6 @@ test "load vector elements via comptime index" {
 
 test "store vector elements via comptime index" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -346,7 +343,6 @@ test "store vector elements via comptime index" {
 
 test "load vector elements via runtime index" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -369,7 +365,6 @@ test "load vector elements via runtime index" {
 
 test "store vector elements via runtime index" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -393,7 +388,6 @@ test "store vector elements via runtime index" {
 
 test "initialize vector which is a struct field" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -975,7 +969,6 @@ test "@addWithOverflow" {
         return error.SkipZigTest;
     }
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1018,7 +1011,6 @@ test "@subWithOverflow" {
         return error.SkipZigTest;
     }
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1049,7 +1041,6 @@ test "@mulWithOverflow" {
         return error.SkipZigTest;
     }
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1072,7 +1063,6 @@ test "@shlWithOverflow" {
         return error.SkipZigTest;
     }
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1098,7 +1088,6 @@ test "alignment of vectors" {
 
 test "loading the second vector from a slice of vectors" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -1115,7 +1104,6 @@ test "loading the second vector from a slice of vectors" {
 
 test "array of vectors is copied" {
     if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
-    if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
     if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO