Commit 0a7be71bc2

Veikka Tuominen <git@vexu.eu>
2021-01-30 13:56:36
stage2 cbe: non pointer optionals
1 parent cfc19ea
Changed files (6)
lib
src
test
stage2
lib/std/hash_map.zig
@@ -50,20 +50,20 @@ pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
 }
 
 pub fn AutoHashMap(comptime K: type, comptime V: type) type {
-    return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), DefaultMaxLoadPercentage);
+    return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K), default_max_load_percentage);
 }
 
 pub fn AutoHashMapUnmanaged(comptime K: type, comptime V: type) type {
-    return HashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), DefaultMaxLoadPercentage);
+    return HashMapUnmanaged(K, V, getAutoHashFn(K), getAutoEqlFn(K), default_max_load_percentage);
 }
 
 /// Builtin hashmap for strings as keys.
 pub fn StringHashMap(comptime V: type) type {
-    return HashMap([]const u8, V, hashString, eqlString, DefaultMaxLoadPercentage);
+    return HashMap([]const u8, V, hashString, eqlString, default_max_load_percentage);
 }
 
 pub fn StringHashMapUnmanaged(comptime V: type) type {
-    return HashMapUnmanaged([]const u8, V, hashString, eqlString, DefaultMaxLoadPercentage);
+    return HashMapUnmanaged([]const u8, V, hashString, eqlString, default_max_load_percentage);
 }
 
 pub fn eqlString(a: []const u8, b: []const u8) bool {
@@ -74,7 +74,10 @@ pub fn hashString(s: []const u8) u64 {
     return std.hash.Wyhash.hash(0, s);
 }
 
-pub const DefaultMaxLoadPercentage = 80;
+/// Deprecated use `default_max_load_percentage`
+pub const DefaultMaxLoadPercentage = default_max_load_percentage;
+
+pub const default_max_load_percentage = 80;
 
 /// General purpose hash table.
 /// No order is guaranteed and any modification invalidates live iterators.
@@ -89,13 +92,13 @@ pub fn HashMap(
     comptime V: type,
     comptime hashFn: fn (key: K) u64,
     comptime eqlFn: fn (a: K, b: K) bool,
-    comptime MaxLoadPercentage: u64,
+    comptime max_load_percentage: u64,
 ) type {
     return struct {
         unmanaged: Unmanaged,
         allocator: *Allocator,
 
-        pub const Unmanaged = HashMapUnmanaged(K, V, hashFn, eqlFn, MaxLoadPercentage);
+        pub const Unmanaged = HashMapUnmanaged(K, V, hashFn, eqlFn, max_load_percentage);
         pub const Entry = Unmanaged.Entry;
         pub const Hash = Unmanaged.Hash;
         pub const Iterator = Unmanaged.Iterator;
@@ -251,9 +254,9 @@ pub fn HashMapUnmanaged(
     comptime V: type,
     hashFn: fn (key: K) u64,
     eqlFn: fn (a: K, b: K) bool,
-    comptime MaxLoadPercentage: u64,
+    comptime max_load_percentage: u64,
 ) type {
-    comptime assert(MaxLoadPercentage > 0 and MaxLoadPercentage < 100);
+    comptime assert(max_load_percentage > 0 and max_load_percentage < 100);
 
     return struct {
         const Self = @This();
@@ -274,12 +277,12 @@ pub fn HashMapUnmanaged(
         // Having a countdown to grow reduces the number of instructions to
         // execute when determining if the hashmap has enough capacity already.
         /// Number of available slots before a grow is needed to satisfy the
-        /// `MaxLoadPercentage`.
+        /// `max_load_percentage`.
         available: Size = 0,
 
         // This is purely empirical and not a /very smart magic constantâ„¢/.
         /// Capacity of the first grow when bootstrapping the hashmap.
-        const MinimalCapacity = 8;
+        const minimal_capacity = 8;
 
         // This hashmap is specially designed for sizes that fit in a u32.
         const Size = u32;
@@ -382,7 +385,7 @@ pub fn HashMapUnmanaged(
             found_existing: bool,
         };
 
-        pub const Managed = HashMap(K, V, hashFn, eqlFn, MaxLoadPercentage);
+        pub const Managed = HashMap(K, V, hashFn, eqlFn, max_load_percentage);
 
         pub fn promote(self: Self, allocator: *Allocator) Managed {
             return .{
@@ -392,7 +395,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn isUnderMaxLoadPercentage(size: Size, cap: Size) bool {
-            return size * 100 < MaxLoadPercentage * cap;
+            return size * 100 < max_load_percentage * cap;
         }
 
         pub fn init(allocator: *Allocator) Self {
@@ -425,7 +428,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn capacityForSize(size: Size) Size {
-            var new_cap = @truncate(u32, (@as(u64, size) * 100) / MaxLoadPercentage + 1);
+            var new_cap = @truncate(u32, (@as(u64, size) * 100) / max_load_percentage + 1);
             new_cap = math.ceilPowerOfTwo(u32, new_cap) catch unreachable;
             return new_cap;
         }
@@ -439,7 +442,7 @@ pub fn HashMapUnmanaged(
             if (self.metadata) |_| {
                 self.initMetadatas();
                 self.size = 0;
-                self.available = @truncate(u32, (self.capacity() * MaxLoadPercentage) / 100);
+                self.available = @truncate(u32, (self.capacity() * max_load_percentage) / 100);
             }
         }
 
@@ -712,9 +715,9 @@ pub fn HashMapUnmanaged(
         }
 
         // This counts the number of occupied slots, used + tombstones, which is
-        // what has to stay under the MaxLoadPercentage of capacity.
+        // what has to stay under the max_load_percentage of capacity.
         fn load(self: *const Self) Size {
-            const max_load = (self.capacity() * MaxLoadPercentage) / 100;
+            const max_load = (self.capacity() * max_load_percentage) / 100;
             assert(max_load >= self.available);
             return @truncate(Size, max_load - self.available);
         }
@@ -733,7 +736,7 @@ pub fn HashMapUnmanaged(
             const new_cap = capacityForSize(self.size);
             try other.allocate(allocator, new_cap);
             other.initMetadatas();
-            other.available = @truncate(u32, (new_cap * MaxLoadPercentage) / 100);
+            other.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
 
             var i: Size = 0;
             var metadata = self.metadata.?;
@@ -751,7 +754,7 @@ pub fn HashMapUnmanaged(
         }
 
         fn grow(self: *Self, allocator: *Allocator, new_capacity: Size) !void {
-            const new_cap = std.math.max(new_capacity, MinimalCapacity);
+            const new_cap = std.math.max(new_capacity, minimal_capacity);
             assert(new_cap > self.capacity());
             assert(std.math.isPowerOfTwo(new_cap));
 
@@ -759,7 +762,7 @@ pub fn HashMapUnmanaged(
             defer map.deinit(allocator);
             try map.allocate(allocator, new_cap);
             map.initMetadatas();
-            map.available = @truncate(u32, (new_cap * MaxLoadPercentage) / 100);
+            map.available = @truncate(u32, (new_cap * max_load_percentage) / 100);
 
             if (self.size != 0) {
                 const old_capacity = self.capacity();
@@ -943,7 +946,7 @@ test "std.hash_map ensureCapacity with existing elements" {
 
     try map.put(0, 0);
     expectEqual(map.count(), 1);
-    expectEqual(map.capacity(), @TypeOf(map).Unmanaged.MinimalCapacity);
+    expectEqual(map.capacity(), @TypeOf(map).Unmanaged.minimal_capacity);
 
     try map.ensureCapacity(65);
     expectEqual(map.count(), 1);
src/codegen/c.zig
@@ -32,6 +32,34 @@ pub const CValue = union(enum) {
 };
 
 pub const CValueMap = std.AutoHashMap(*Inst, CValue);
+pub const TypedefMap = std.HashMap(Type, struct { name: []const u8, rendered: []u8 }, Type.hash, Type.eql, std.hash_map.default_max_load_percentage);
+
+fn formatTypeAsCIdentifier(
+    data: Type,
+    comptime fmt: []const u8,
+    options: std.fmt.FormatOptions,
+    writer: anytype,
+) !void {
+    var buffer = [1]u8{0} ** 128;
+    // We don't care if it gets cut off, it's still more unique than a number
+    var buf = std.fmt.bufPrint(&buffer, "{}", .{data}) catch &buffer;
+
+    for (buf) |c, i| {
+        switch (c) {
+            0 => return writer.writeAll(buf[0..i]),
+            'a'...'z', 'A'...'Z', '_', '$' => {},
+            '0'...'9' => if (i == 0) {
+                buf[i] = '_';
+            },
+            else => buf[i] = '_',
+        }
+    }
+    return writer.writeAll(buf);
+}
+
+pub fn typeToCIdentifier(t: Type) std.fmt.Formatter(formatTypeAsCIdentifier) {
+    return .{ .data = t };
+}
 
 /// This data is available when outputting .c code for a Module.
 /// It is not available when generating .h file.
@@ -115,6 +143,7 @@ pub const DeclGen = struct {
     decl: *Decl,
     fwd_decl: std.ArrayList(u8),
     error_msg: ?*Module.ErrorMsg,
+    typedefs: TypedefMap,
 
     fn fail(dg: *DeclGen, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
         dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, .{
@@ -325,22 +354,55 @@ pub const DeclGen = struct {
                 const child_type = t.optionalChild(&opt_buf);
                 if (t.isPtrLikeOptional()) {
                     return dg.renderType(w, child_type);
+                } else if (dg.typedefs.get(t)) |some| {
+                    return w.writeAll(some.name);
                 }
 
-                // TODO this needs to be typedeffed since different structs are different types.
-                try w.writeAll("struct { ");
-                try dg.renderType(w, child_type);
-                try w.writeAll(" payload; bool is_null; }");
+                var buffer = std.ArrayList(u8).init(dg.typedefs.allocator);
+                defer buffer.deinit();
+                const bw = buffer.writer();
+
+                try bw.writeAll("typedef struct { ");
+                try dg.renderType(bw, child_type);
+                try bw.writeAll(" payload; bool is_null; } ");
+                const name_index = buffer.items.len;
+                try bw.print("zig_opt_{s}_t;\n", .{typeToCIdentifier(child_type)});
+
+                const rendered = buffer.toOwnedSlice();
+                errdefer dg.typedefs.allocator.free(rendered);
+                const name = rendered[name_index .. rendered.len - 2];
+
+                try dg.typedefs.ensureCapacity(dg.typedefs.capacity() + 1);
+                try w.writeAll(name);
+                dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
             },
             .ErrorSet => {
                 comptime std.debug.assert(Type.initTag(.anyerror).abiSize(std.Target.current) == 2);
                 try w.writeAll("uint16_t");
             },
             .ErrorUnion => {
-                // TODO this needs to be typedeffed since different structs are different types.
-                try w.writeAll("struct { ");
-                try dg.renderType(w, t.errorUnionChild());
-                try w.writeAll(" payload; uint16_t error; }");
+                if (dg.typedefs.get(t)) |some| {
+                    return w.writeAll(some.name);
+                }
+                const child_type = t.errorUnionChild();
+
+                var buffer = std.ArrayList(u8).init(dg.typedefs.allocator);
+                defer buffer.deinit();
+                const bw = buffer.writer();
+
+                try bw.writeAll("typedef struct { ");
+                try dg.renderType(bw, t.errorUnionChild());
+                try bw.writeAll(" payload; uint16_t error; } ");
+                const name_index = buffer.items.len;
+                try bw.print("zig_err_union_{s}_t;\n", .{typeToCIdentifier(child_type)});
+
+                const rendered = buffer.toOwnedSlice();
+                errdefer dg.typedefs.allocator.free(rendered);
+                const name = rendered[name_index .. rendered.len - 2];
+
+                try dg.typedefs.ensureCapacity(dg.typedefs.capacity() + 1);
+                try w.writeAll(name);
+                dg.typedefs.putAssumeCapacityNoClobber(t, .{ .name = name, .rendered = rendered });
             },
             .Null, .Undefined => unreachable, // must be const or comptime
             else => |e| return dg.fail(dg.decl.src(), "TODO: C backend: implement type {s}", .{
src/link/C.zig
@@ -9,6 +9,7 @@ const codegen = @import("../codegen/c.zig");
 const link = @import("../link.zig");
 const trace = @import("../tracy.zig").trace;
 const C = @This();
+const Type = @import("../type.zig").Type;
 
 pub const base_tag: link.File.Tag = .c;
 pub const zig_h = @embedFile("C/zig.h");
@@ -28,9 +29,11 @@ pub const DeclBlock = struct {
 /// Per-function data.
 pub const FnBlock = struct {
     fwd_decl: std.ArrayListUnmanaged(u8),
+    typedefs: codegen.TypedefMap.Unmanaged,
 
     pub const empty: FnBlock = .{
         .fwd_decl = .{},
+        .typedefs = .{},
     };
 };
 
@@ -74,6 +77,11 @@ pub fn allocateDeclIndexes(self: *C, decl: *Module.Decl) !void {}
 pub fn freeDecl(self: *C, decl: *Module.Decl) void {
     decl.link.c.code.deinit(self.base.allocator);
     decl.fn_link.c.fwd_decl.deinit(self.base.allocator);
+    var it = decl.fn_link.c.typedefs.iterator();
+    while (it.next()) |some| {
+        self.base.allocator.free(some.value.rendered);
+    }
+    decl.fn_link.c.typedefs.deinit(self.base.allocator);
 }
 
 pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
@@ -81,8 +89,10 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
     defer tracy.end();
 
     const fwd_decl = &decl.fn_link.c.fwd_decl;
+    const typedefs = &decl.fn_link.c.typedefs;
     const code = &decl.link.c.code;
     fwd_decl.shrinkRetainingCapacity(0);
+    typedefs.clearRetainingCapacity();
     code.shrinkRetainingCapacity(0);
 
     var object: codegen.Object = .{
@@ -91,6 +101,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
             .error_msg = null,
             .decl = decl,
             .fwd_decl = fwd_decl.toManaged(module.gpa),
+            .typedefs = typedefs.promote(module.gpa),
         },
         .gpa = module.gpa,
         .code = code.toManaged(module.gpa),
@@ -98,9 +109,16 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
         .indent_writer = undefined, // set later so we can get a pointer to object.code
     };
     object.indent_writer = .{ .underlying_writer = object.code.writer() };
-    defer object.value_map.deinit();
-    defer object.code.deinit();
-    defer object.dg.fwd_decl.deinit();
+    defer {
+        object.value_map.deinit();
+        object.code.deinit();
+        object.dg.fwd_decl.deinit();
+        var it = object.dg.typedefs.iterator();
+        while (it.next()) |some| {
+            module.gpa.free(some.value.rendered);
+        }
+        object.dg.typedefs.deinit();
+    }
 
     codegen.genDecl(&object) catch |err| switch (err) {
         error.AnalysisFail => {
@@ -111,6 +129,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
     };
 
     fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
+    typedefs.* = object.dg.typedefs.unmanaged;
+    object.dg.typedefs.unmanaged = .{};
     code.* = object.code.moveToUnmanaged();
 
     // Free excess allocated memory for this Decl.
@@ -142,7 +162,7 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
     defer all_buffers.deinit();
 
     // This is at least enough until we get to the function bodies without error handling.
-    try all_buffers.ensureCapacity(module.decl_table.count() + 1);
+    try all_buffers.ensureCapacity(module.decl_table.count() + 2);
 
     var file_size: u64 = zig_h.len;
     all_buffers.appendAssumeCapacity(.{
@@ -150,22 +170,25 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
         .iov_len = zig_h.len,
     });
 
-    var error_defs_buf = std.ArrayList(u8).init(comp.gpa);
-    defer error_defs_buf.deinit();
+    var err_typedef_buf = std.ArrayList(u8).init(comp.gpa);
+    defer err_typedef_buf.deinit();
+    const err_typedef_writer = err_typedef_buf.writer();
+    const err_typedef_item = all_buffers.addOneAssumeCapacity();
 
-    var it = module.global_error_set.iterator();
-    while (it.next()) |entry| {
-        try error_defs_buf.writer().print("#define zig_error_{s} {d}\n", .{ entry.key, entry.value });
+    render_errors: {
+        if (module.global_error_set.size == 0) break :render_errors;
+        var it = module.global_error_set.iterator();
+        while (it.next()) |entry| {
+            try err_typedef_writer.print("#define zig_error_{s} {d}\n", .{ entry.key, entry.value });
+        }
+        try err_typedef_writer.writeByte('\n');
     }
-    try error_defs_buf.writer().writeByte('\n');
-    all_buffers.appendAssumeCapacity(.{
-        .iov_base = error_defs_buf.items.ptr,
-        .iov_len = error_defs_buf.items.len,
-    });
 
     var fn_count: usize = 0;
+    var typedefs = std.HashMap(Type, []const u8, Type.hash, Type.eql, std.hash_map.default_max_load_percentage).init(comp.gpa);
+    defer typedefs.deinit();
 
-    // Forward decls and non-functions first.
+    // Typedefs, forward decls and non-functions first.
     // TODO: performance investigation: would keeping a list of Decls that we should
     // generate, rather than querying here, be faster?
     for (module.decl_table.items()) |kv| {
@@ -174,6 +197,16 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
             .most_recent => |tvm| {
                 const buf = buf: {
                     if (tvm.typed_value.val.castTag(.function)) |_| {
+                        var it = decl.fn_link.c.typedefs.iterator();
+                        while (it.next()) |new| {
+                            if (typedefs.get(new.key)) |previous| {
+                                try err_typedef_writer.print("typedef {s} {s};\n", .{ previous, new.value.name });
+                            } else {
+                                try typedefs.ensureCapacity(typedefs.capacity() + 1);
+                                try err_typedef_writer.writeAll(new.value.rendered);
+                                typedefs.putAssumeCapacityNoClobber(new.key, new.value.name);
+                            }
+                        }
                         fn_count += 1;
                         break :buf decl.fn_link.c.fwd_decl.items;
                     } else {
@@ -190,6 +223,12 @@ pub fn flushModule(self: *C, comp: *Compilation) !void {
         }
     }
 
+    err_typedef_item.* = .{
+        .iov_base = err_typedef_buf.items.ptr,
+        .iov_len = err_typedef_buf.items.len,
+    };
+    file_size += err_typedef_buf.items.len;
+
     // Now the function bodies.
     try all_buffers.ensureCapacity(all_buffers.items.len + fn_count);
     for (module.decl_table.items()) |kv| {
src/Compilation.zig
@@ -1653,6 +1653,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                     .error_msg = null,
                     .decl = decl,
                     .fwd_decl = fwd_decl.toManaged(module.gpa),
+                    // we don't want to emit optionals and error unions to headers since they have no ABI
+                    .typedefs = undefined,
                 };
                 defer dg.fwd_decl.deinit();
 
src/test.zig
@@ -868,11 +868,10 @@ pub const TestContext = struct {
                                 std.testing.zig_exe_path,
                                 "run",
                                 "-cflags",
-                                "-std=c89",
+                                "-std=c99",
                                 "-pedantic",
                                 "-Werror",
                                 "-Wno-incompatible-library-redeclaration", // https://github.com/ziglang/zig/issues/875
-                                "-Wno-declaration-after-statement",
                                 "--",
                                 "-lc",
                                 exe_path,
test/stage2/cbe.zig
@@ -258,6 +258,18 @@ pub fn addCases(ctx: *TestContext) !void {
             \\    return count - 5;
             \\}
         , "");
+
+        // Same with non pointer optionals
+        case.addCompareOutput(
+            \\export fn main() c_int {
+            \\    var count: c_int = 0;
+            \\    var opt_ptr: ?c_int = count;
+            \\    while (opt_ptr) |_| : (count += 1) {
+            \\        if (count == 4) opt_ptr = null;
+            \\    }
+            \\    return count - 5;
+            \\}
+        , "");
     }
     ctx.c("empty start function", linux_x64,
         \\export fn _start() noreturn {