Commit 5dfcd09e49

Andrew Kelley <superjoe30@gmail.com>
2018-08-03 23:22:17
self-hosted: watch files and trigger a rebuild
1 parent 7f6e97c
src/ir.cpp
@@ -9614,6 +9614,9 @@ static ConstExprValue *ir_resolve_const(IrAnalyze *ira, IrInstruction *value, Un
         case ConstValSpecialStatic:
             return &value->value;
         case ConstValSpecialRuntime:
+            if (!type_has_bits(value->value.type)) {
+                return &value->value;
+            }
             ir_add_error(ira, value, buf_sprintf("unable to evaluate constant expression"));
             return nullptr;
         case ConstValSpecialUndef:
@@ -16115,8 +16118,14 @@ static TypeTableEntry *ir_analyze_container_init_fields_union(IrAnalyze *ira, Ir
     if (casted_field_value == ira->codegen->invalid_instruction)
         return ira->codegen->builtin_types.entry_invalid;
 
+    type_ensure_zero_bits_known(ira->codegen, casted_field_value->value.type);
+    if (type_is_invalid(casted_field_value->value.type))
+        return ira->codegen->builtin_types.entry_invalid;
+
     bool is_comptime = ir_should_inline(ira->new_irb.exec, instruction->scope);
-    if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime) {
+    if (is_comptime || casted_field_value->value.special != ConstValSpecialRuntime ||
+        !type_has_bits(casted_field_value->value.type))
+    {
         ConstExprValue *field_val = ir_resolve_const(ira, casted_field_value, UndefOk);
         if (!field_val)
             return ira->codegen->builtin_types.entry_invalid;
src-self-hosted/compilation.zig
@@ -230,6 +230,8 @@ pub const Compilation = struct {
 
     c_int_types: [CInt.list.len]*Type.Int,
 
+    fs_watch: *fs.Watch(*Scope.Root),
+
     const IntTypeTable = std.HashMap(*const Type.Int.Key, *Type.Int, Type.Int.Key.hash, Type.Int.Key.eql);
     const ArrayTypeTable = std.HashMap(*const Type.Array.Key, *Type.Array, Type.Array.Key.hash, Type.Array.Key.eql);
     const PtrTypeTable = std.HashMap(*const Type.Pointer.Key, *Type.Pointer, Type.Pointer.Key.hash, Type.Pointer.Key.eql);
@@ -285,6 +287,7 @@ pub const Compilation = struct {
         LibCMissingDynamicLinker,
         InvalidDarwinVersionString,
         UnsupportedLinkArchitecture,
+        UserResourceLimitReached,
     };
 
     pub const Event = union(enum) {
@@ -331,7 +334,8 @@ pub const Compilation = struct {
         zig_lib_dir: []const u8,
     ) !*Compilation {
         const loop = event_loop_local.loop;
-        const comp = try event_loop_local.loop.allocator.create(Compilation{
+        const comp = try event_loop_local.loop.allocator.createOne(Compilation);
+        comp.* = Compilation{
             .loop = loop,
             .arena_allocator = std.heap.ArenaAllocator.init(loop.allocator),
             .event_loop_local = event_loop_local,
@@ -376,7 +380,7 @@ pub const Compilation = struct {
             .fn_link_set = event.Locked(FnLinkSet).init(loop, FnLinkSet.init()),
             .windows_subsystem_windows = false,
             .windows_subsystem_console = false,
-            .link_libs_list = undefined,
+            .link_libs_list = ArrayList(*LinkLib).init(comp.arena()),
             .libc_link_lib = null,
             .err_color = errmsg.Color.Auto,
             .darwin_frameworks = [][]const u8{},
@@ -417,8 +421,10 @@ pub const Compilation = struct {
             .override_libc = null,
             .destroy_handle = undefined,
             .have_err_ret_tracing = false,
-            .primitive_type_table = undefined,
-        });
+            .primitive_type_table = TypeTable.init(comp.arena()),
+
+            .fs_watch = undefined,
+        };
         errdefer {
             comp.int_type_table.private_data.deinit();
             comp.array_type_table.private_data.deinit();
@@ -431,9 +437,7 @@ pub const Compilation = struct {
         comp.name = try Buffer.init(comp.arena(), name);
         comp.llvm_triple = try target.getTriple(comp.arena());
         comp.llvm_target = try Target.llvmTargetFromTriple(comp.llvm_triple);
-        comp.link_libs_list = ArrayList(*LinkLib).init(comp.arena());
         comp.zig_std_dir = try std.os.path.join(comp.arena(), zig_lib_dir, "std");
-        comp.primitive_type_table = TypeTable.init(comp.arena());
 
         const opt_level = switch (build_mode) {
             builtin.Mode.Debug => llvm.CodeGenLevelNone,
@@ -485,6 +489,9 @@ pub const Compilation = struct {
             comp.root_package = try Package.create(comp.arena(), ".", "");
         }
 
+        comp.fs_watch = try fs.Watch(*Scope.Root).create(loop, 16);
+        errdefer comp.fs_watch.destroy();
+
         try comp.initTypes();
 
         comp.destroy_handle = try async<loop.allocator> comp.internalDeinit();
@@ -686,6 +693,7 @@ pub const Compilation = struct {
             os.deleteTree(self.arena(), tmp_dir) catch {};
         } else |_| {};
 
+        self.fs_watch.destroy();
         self.events.destroy();
 
         llvm.DisposeMessage(self.target_layout_str);
@@ -720,7 +728,9 @@ pub const Compilation = struct {
         var build_result = await (async self.initialCompile() catch unreachable);
 
         while (true) {
-            const link_result = if (build_result) self.maybeLink() else |err| err;
+            const link_result = if (build_result) blk: {
+                break :blk await (async self.maybeLink() catch unreachable);
+            } else |err| err;
             // this makes a handy error return trace and stack trace in debug mode
             if (std.debug.runtime_safety) {
                 link_result catch unreachable;
@@ -745,9 +755,35 @@ pub const Compilation = struct {
                 await (async self.events.put(Event{ .Error = err }) catch unreachable);
             }
 
+            // First, get an item from the watch channel, waiting on the channel.
             var group = event.Group(BuildError!void).init(self.loop);
-            while (self.fs_watch.channel.getOrNull()) |root_scope| {
-                try group.call(rebuildFile, self, root_scope);
+            {
+                const ev = await (async self.fs_watch.channel.get() catch unreachable);
+                const root_scope = switch (ev) {
+                    fs.Watch(*Scope.Root).Event.CloseWrite => |x| x,
+                    fs.Watch(*Scope.Root).Event.Err => |err| {
+                        build_result = err;
+                        continue;
+                    },
+                };
+                group.call(rebuildFile, self, root_scope) catch |err| {
+                    build_result = err;
+                    continue;
+                };
+            }
+            // Next, get all the items from the channel that are buffered up.
+            while (await (async self.fs_watch.channel.getOrNull() catch unreachable)) |ev| {
+                const root_scope = switch (ev) {
+                    fs.Watch(*Scope.Root).Event.CloseWrite => |x| x,
+                    fs.Watch(*Scope.Root).Event.Err => |err| {
+                        build_result = err;
+                        continue;
+                    },
+                };
+                group.call(rebuildFile, self, root_scope) catch |err| {
+                    build_result = err;
+                    continue;
+                };
             }
             build_result = await (async group.wait() catch unreachable);
         }
@@ -757,11 +793,11 @@ pub const Compilation = struct {
         const tree_scope = blk: {
             const source_code = (await (async fs.readFile(
                 self.loop,
-                root_src_real_path,
+                root_scope.realpath,
                 max_src_size,
             ) catch unreachable)) catch |err| {
-                try printError("unable to open '{}': {}", root_src_real_path, err);
-                return err;
+                try self.addCompileErrorCli(root_scope.realpath, "unable to open: {}", @errorName(err));
+                return;
             };
             errdefer self.gpa().free(source_code);
 
@@ -793,9 +829,9 @@ pub const Compilation = struct {
         var decl_group = event.Group(BuildError!void).init(self.loop);
         defer decl_group.deinit();
 
-        try self.rebuildChangedDecls(
+        try await try async self.rebuildChangedDecls(
             &decl_group,
-            locked_table,
+            locked_table.value,
             root_scope.decls,
             &tree_scope.tree.root_node.decls,
             tree_scope,
@@ -809,7 +845,7 @@ pub const Compilation = struct {
         group: *event.Group(BuildError!void),
         locked_table: *Decl.Table,
         decl_scope: *Scope.Decls,
-        ast_decls: &ast.Node.Root.DeclList,
+        ast_decls: *ast.Node.Root.DeclList,
         tree_scope: *Scope.AstTree,
     ) !void {
         var existing_decls = try locked_table.clone();
@@ -824,14 +860,14 @@ pub const Compilation = struct {
 
                     // TODO connect existing comptime decls to updated source files
 
-                    try self.prelink_group.call(addCompTimeBlock, self, &decl_scope.base, comptime_node);
+                    try self.prelink_group.call(addCompTimeBlock, self, tree_scope, &decl_scope.base, comptime_node);
                 },
                 ast.Node.Id.VarDecl => @panic("TODO"),
                 ast.Node.Id.FnProto => {
                     const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", decl);
 
                     const name = if (fn_proto.name_token) |name_token| tree_scope.tree.tokenSlice(name_token) else {
-                        try self.addCompileError(root_scope, Span{
+                        try self.addCompileError(tree_scope, Span{
                             .first = fn_proto.fn_token,
                             .last = fn_proto.fn_token + 1,
                         }, "missing function name");
@@ -856,10 +892,12 @@ pub const Compilation = struct {
                                 .visib = parseVisibToken(tree_scope.tree, fn_proto.visib_token),
                                 .resolution = event.Future(BuildError!void).init(self.loop),
                                 .parent_scope = &decl_scope.base,
+                                .tree_scope = tree_scope,
                             },
                             .value = Decl.Fn.Val{ .Unresolved = {} },
                             .fn_proto = fn_proto,
                         });
+                        tree_scope.base.ref();
                         errdefer self.gpa().destroy(fn_decl);
 
                         try group.call(addTopLevelDecl, self, &fn_decl.base, locked_table);
@@ -883,8 +921,8 @@ pub const Compilation = struct {
             const root_scope = blk: {
                 // TODO async/await os.path.real
                 const root_src_real_path = os.path.real(self.gpa(), root_src_path) catch |err| {
-                    try printError("unable to get real path '{}': {}", root_src_path, err);
-                    return err;
+                    try self.addCompileErrorCli(root_src_path, "unable to open: {}", @errorName(err));
+                    return;
                 };
                 errdefer self.gpa().free(root_src_real_path);
 
@@ -892,7 +930,8 @@ pub const Compilation = struct {
             };
             defer root_scope.base.deref(self);
 
-            try self.rebuildFile(root_scope);
+            assert((try await try async self.fs_watch.addFile(root_scope.realpath, root_scope)) == null);
+            try await try async self.rebuildFile(root_scope);
         }
     }
 
@@ -917,6 +956,7 @@ pub const Compilation = struct {
     /// caller takes ownership of resulting Code
     async fn genAndAnalyzeCode(
         comp: *Compilation,
+        tree_scope: *Scope.AstTree,
         scope: *Scope,
         node: *ast.Node,
         expected_type: ?*Type,
@@ -924,6 +964,7 @@ pub const Compilation = struct {
         const unanalyzed_code = try await (async ir.gen(
             comp,
             node,
+            tree_scope,
             scope,
         ) catch unreachable);
         defer unanalyzed_code.destroy(comp.gpa());
@@ -950,6 +991,7 @@ pub const Compilation = struct {
 
     async fn addCompTimeBlock(
         comp: *Compilation,
+        tree_scope: *Scope.AstTree,
         scope: *Scope,
         comptime_node: *ast.Node.Comptime,
     ) !void {
@@ -958,6 +1000,7 @@ pub const Compilation = struct {
 
         const analyzed_code = (await (async genAndAnalyzeCode(
             comp,
+            tree_scope,
             scope,
             comptime_node.expr,
             &void_type.base,
@@ -975,25 +1018,37 @@ pub const Compilation = struct {
         decl: *Decl,
         locked_table: *Decl.Table,
     ) !void {
-        const tree = decl.findRootScope().tree;
-        const is_export = decl.isExported(tree);
+        const is_export = decl.isExported(decl.tree_scope.tree);
 
         if (is_export) {
             try self.prelink_group.call(verifyUniqueSymbol, self, decl);
             try self.prelink_group.call(resolveDecl, self, decl);
         }
 
-        if (try locked_table.put(decl.name, decl)) |other_decl| {
-            try self.addCompileError(decls.base.findRoot(), decl.getSpan(), "redefinition of '{}'", decl.name);
+        const gop = try locked_table.getOrPut(decl.name);
+        if (gop.found_existing) {
+            try self.addCompileError(decl.tree_scope, decl.getSpan(), "redefinition of '{}'", decl.name);
             // TODO note: other definition here
+        } else {
+            gop.kv.value = decl;
         }
     }
 
-    fn addCompileError(self: *Compilation, root: *Scope.Root, span: Span, comptime fmt: []const u8, args: ...) !void {
+    fn addCompileError(self: *Compilation, tree_scope: *Scope.AstTree, span: Span, comptime fmt: []const u8, args: ...) !void {
+        const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
+        errdefer self.gpa().free(text);
+
+        const msg = try Msg.createFromScope(self, tree_scope, span, text);
+        errdefer msg.destroy();
+
+        try self.prelink_group.call(addCompileErrorAsync, self, msg);
+    }
+
+    fn addCompileErrorCli(self: *Compilation, realpath: []const u8, comptime fmt: []const u8, args: ...) !void {
         const text = try std.fmt.allocPrint(self.gpa(), fmt, args);
         errdefer self.gpa().free(text);
 
-        const msg = try Msg.createFromScope(self, root, span, text);
+        const msg = try Msg.createFromCli(self, realpath, text);
         errdefer msg.destroy();
 
         try self.prelink_group.call(addCompileErrorAsync, self, msg);
@@ -1017,7 +1072,7 @@ pub const Compilation = struct {
 
         if (try exported_symbol_names.value.put(decl.name, decl)) |other_decl| {
             try self.addCompileError(
-                decl.findRootScope(),
+                decl.tree_scope,
                 decl.getSpan(),
                 "exported symbol collision: '{}'",
                 decl.name,
@@ -1141,18 +1196,24 @@ pub const Compilation = struct {
     }
 
     /// Returns a value which has been ref()'d once
-    async fn analyzeConstValue(comp: *Compilation, scope: *Scope, node: *ast.Node, expected_type: *Type) !*Value {
-        const analyzed_code = try await (async comp.genAndAnalyzeCode(scope, node, expected_type) catch unreachable);
+    async fn analyzeConstValue(
+        comp: *Compilation,
+        tree_scope: *Scope.AstTree,
+        scope: *Scope,
+        node: *ast.Node,
+        expected_type: *Type,
+    ) !*Value {
+        const analyzed_code = try await (async comp.genAndAnalyzeCode(tree_scope, scope, node, expected_type) catch unreachable);
         defer analyzed_code.destroy(comp.gpa());
 
         return analyzed_code.getCompTimeResult(comp);
     }
 
-    async fn analyzeTypeExpr(comp: *Compilation, scope: *Scope, node: *ast.Node) !*Type {
+    async fn analyzeTypeExpr(comp: *Compilation, tree_scope: *Scope.AstTree, scope: *Scope, node: *ast.Node) !*Type {
         const meta_type = &Type.MetaType.get(comp).base;
         defer meta_type.base.deref(comp);
 
-        const result_val = try await (async comp.analyzeConstValue(scope, node, meta_type) catch unreachable);
+        const result_val = try await (async comp.analyzeConstValue(tree_scope, scope, node, meta_type) catch unreachable);
         errdefer result_val.base.deref(comp);
 
         return result_val.cast(Type).?;
@@ -1168,13 +1229,6 @@ pub const Compilation = struct {
     }
 };
 
-fn printError(comptime format: []const u8, args: ...) !void {
-    var stderr_file = try std.io.getStdErr();
-    var stderr_file_out_stream = std.io.FileOutStream.init(&stderr_file);
-    const out_stream = &stderr_file_out_stream.stream;
-    try out_stream.print(format, args);
-}
-
 fn parseVisibToken(tree: *ast.Tree, optional_token_index: ?ast.TokenIndex) Visib {
     if (optional_token_index) |token_index| {
         const token = tree.tokens.at(token_index);
@@ -1198,12 +1252,14 @@ async fn generateDecl(comp: *Compilation, decl: *Decl) !void {
 }
 
 async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
+    const tree_scope = fn_decl.base.tree_scope;
+
     const body_node = fn_decl.fn_proto.body_node orelse return await (async generateDeclFnProto(comp, fn_decl) catch unreachable);
 
     const fndef_scope = try Scope.FnDef.create(comp, fn_decl.base.parent_scope);
     defer fndef_scope.base.deref(comp);
 
-    const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+    const fn_type = try await (async analyzeFnType(comp, tree_scope, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
     defer fn_type.base.base.deref(comp);
 
     var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
@@ -1216,18 +1272,17 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
     symbol_name_consumed = true;
 
     // Define local parameter variables
-    const root_scope = fn_decl.base.findRootScope();
     for (fn_type.key.data.Normal.params) |param, i| {
         //AstNode *param_decl_node = get_param_decl_node(fn_table_entry, i);
         const param_decl = @fieldParentPtr(ast.Node.ParamDecl, "base", fn_decl.fn_proto.params.at(i).*);
         const name_token = param_decl.name_token orelse {
-            try comp.addCompileError(root_scope, Span{
+            try comp.addCompileError(tree_scope, Span{
                 .first = param_decl.firstToken(),
                 .last = param_decl.type_node.firstToken(),
             }, "missing parameter name");
             return error.SemanticAnalysisFailed;
         };
-        const param_name = root_scope.tree.tokenSlice(name_token);
+        const param_name = tree_scope.tree.tokenSlice(name_token);
 
         // if (is_noalias && get_codegen_ptr_type(param_type) == nullptr) {
         //     add_node_error(g, param_decl_node, buf_sprintf("noalias on non-pointer parameter"));
@@ -1249,6 +1304,7 @@ async fn generateDeclFn(comp: *Compilation, fn_decl: *Decl.Fn) !void {
     }
 
     const analyzed_code = try await (async comp.genAndAnalyzeCode(
+        tree_scope,
         fn_val.child_scope,
         body_node,
         fn_type.key.data.Normal.return_type,
@@ -1279,12 +1335,17 @@ fn getZigDir(allocator: *mem.Allocator) ![]u8 {
     return os.getAppDataDir(allocator, "zig");
 }
 
-async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.FnProto) !*Type.Fn {
+async fn analyzeFnType(
+    comp: *Compilation,
+    tree_scope: *Scope.AstTree,
+    scope: *Scope,
+    fn_proto: *ast.Node.FnProto,
+) !*Type.Fn {
     const return_type_node = switch (fn_proto.return_type) {
         ast.Node.FnProto.ReturnType.Explicit => |n| n,
         ast.Node.FnProto.ReturnType.InferErrorSet => |n| n,
     };
-    const return_type = try await (async comp.analyzeTypeExpr(scope, return_type_node) catch unreachable);
+    const return_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, return_type_node) catch unreachable);
     return_type.base.deref(comp);
 
     var params = ArrayList(Type.Fn.Param).init(comp.gpa());
@@ -1300,7 +1361,7 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
         var it = fn_proto.params.iterator(0);
         while (it.next()) |param_node_ptr| {
             const param_node = param_node_ptr.*.cast(ast.Node.ParamDecl).?;
-            const param_type = try await (async comp.analyzeTypeExpr(scope, param_node.type_node) catch unreachable);
+            const param_type = try await (async comp.analyzeTypeExpr(tree_scope, scope, param_node.type_node) catch unreachable);
             errdefer param_type.base.deref(comp);
             try params.append(Type.Fn.Param{
                 .typ = param_type,
@@ -1337,7 +1398,12 @@ async fn analyzeFnType(comp: *Compilation, scope: *Scope, fn_proto: *ast.Node.Fn
 }
 
 async fn generateDeclFnProto(comp: *Compilation, fn_decl: *Decl.Fn) !void {
-    const fn_type = try await (async analyzeFnType(comp, fn_decl.base.parent_scope, fn_decl.fn_proto) catch unreachable);
+    const fn_type = try await (async analyzeFnType(
+        comp,
+        fn_decl.base.tree_scope,
+        fn_decl.base.parent_scope,
+        fn_decl.fn_proto,
+    ) catch unreachable);
     defer fn_type.base.base.deref(comp);
 
     var symbol_name = try std.Buffer.init(comp.gpa(), fn_decl.base.name);
src-self-hosted/decl.zig
@@ -16,6 +16,8 @@ pub const Decl = struct {
     visib: Visib,
     resolution: event.Future(Compilation.BuildError!void),
     parent_scope: *Scope,
+
+    // TODO when we destroy the decl, deref the tree scope
     tree_scope: *Scope.AstTree,
 
     pub const Table = std.HashMap([]const u8, *Decl, mem.hash_slice_u8, mem.eql_slice_u8);
src-self-hosted/errmsg.zig
@@ -33,35 +33,48 @@ pub const Span = struct {
 };
 
 pub const Msg = struct {
-    span: Span,
     text: []u8,
+    realpath: []u8,
     data: Data,
 
     const Data = union(enum) {
+        Cli: Cli,
         PathAndTree: PathAndTree,
         ScopeAndComp: ScopeAndComp,
     };
 
     const PathAndTree = struct {
-        realpath: []const u8,
+        span: Span,
         tree: *ast.Tree,
         allocator: *mem.Allocator,
     };
 
     const ScopeAndComp = struct {
+        span: Span,
         tree_scope: *Scope.AstTree,
         compilation: *Compilation,
     };
 
+    const Cli = struct {
+        allocator: *mem.Allocator,
+    };
+
     pub fn destroy(self: *Msg) void {
         switch (self.data) {
+            Data.Cli => |cli| {
+                cli.allocator.free(self.text);
+                cli.allocator.free(self.realpath);
+                cli.allocator.destroy(self);
+            },
             Data.PathAndTree => |path_and_tree| {
                 path_and_tree.allocator.free(self.text);
+                path_and_tree.allocator.free(self.realpath);
                 path_and_tree.allocator.destroy(self);
             },
             Data.ScopeAndComp => |scope_and_comp| {
                 scope_and_comp.tree_scope.base.deref(scope_and_comp.compilation);
                 scope_and_comp.compilation.gpa().free(self.text);
+                scope_and_comp.compilation.gpa().free(self.realpath);
                 scope_and_comp.compilation.gpa().destroy(self);
             },
         }
@@ -69,6 +82,7 @@ pub const Msg = struct {
 
     fn getAllocator(self: *const Msg) *mem.Allocator {
         switch (self.data) {
+            Data.Cli => |cli| return cli.allocator,
             Data.PathAndTree => |path_and_tree| {
                 return path_and_tree.allocator;
             },
@@ -78,19 +92,9 @@ pub const Msg = struct {
         }
     }
 
-    pub fn getRealPath(self: *const Msg) []const u8 {
-        switch (self.data) {
-            Data.PathAndTree => |path_and_tree| {
-                return path_and_tree.realpath;
-            },
-            Data.ScopeAndComp => |scope_and_comp| {
-                return scope_and_comp.tree_scope.root().realpath;
-            },
-        }
-    }
-
     pub fn getTree(self: *const Msg) *ast.Tree {
         switch (self.data) {
+            Data.Cli => unreachable,
             Data.PathAndTree => |path_and_tree| {
                 return path_and_tree.tree;
             },
@@ -100,16 +104,28 @@ pub const Msg = struct {
         }
     }
 
+    pub fn getSpan(self: *const Msg) Span {
+        return switch (self.data) {
+            Data.Cli => unreachable,
+            Data.PathAndTree => |path_and_tree| path_and_tree.span,
+            Data.ScopeAndComp => |scope_and_comp| scope_and_comp.span,
+        };
+    }
+
     /// Takes ownership of text
     /// References tree_scope, and derefs when the msg is freed
     pub fn createFromScope(comp: *Compilation, tree_scope: *Scope.AstTree, span: Span, text: []u8) !*Msg {
+        const realpath = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
+        errdefer comp.gpa().free(realpath);
+
         const msg = try comp.gpa().create(Msg{
             .text = text,
-            .span = span,
+            .realpath = realpath,
             .data = Data{
                 .ScopeAndComp = ScopeAndComp{
                     .tree_scope = tree_scope,
                     .compilation = comp,
+                    .span = span,
                 },
             },
         });
@@ -117,6 +133,22 @@ pub const Msg = struct {
         return msg;
     }
 
+    /// Caller owns returned Msg and must free with `allocator`
+    /// allocator will additionally be used for printing messages later.
+    pub fn createFromCli(comp: *Compilation, realpath: []const u8, text: []u8) !*Msg {
+        const realpath_copy = try mem.dupe(comp.gpa(), u8, realpath);
+        errdefer comp.gpa().free(realpath_copy);
+
+        const msg = try comp.gpa().create(Msg{
+            .text = text,
+            .realpath = realpath_copy,
+            .data = Data{
+                .Cli = Cli{ .allocator = comp.gpa() },
+            },
+        });
+        return msg;
+    }
+
     pub fn createFromParseErrorAndScope(
         comp: *Compilation,
         tree_scope: *Scope.AstTree,
@@ -126,19 +158,23 @@ pub const Msg = struct {
         var text_buf = try std.Buffer.initSize(comp.gpa(), 0);
         defer text_buf.deinit();
 
+        const realpath_copy = try mem.dupe(comp.gpa(), u8, tree_scope.root().realpath);
+        errdefer comp.gpa().free(realpath_copy);
+
         var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
         try parse_error.render(&tree_scope.tree.tokens, out_stream);
 
         const msg = try comp.gpa().create(Msg{
             .text = undefined,
-            .span = Span{
-                .first = loc_token,
-                .last = loc_token,
-            },
+            .realpath = realpath_copy,
             .data = Data{
                 .ScopeAndComp = ScopeAndComp{
                     .tree_scope = tree_scope,
                     .compilation = comp,
+                    .span = Span{
+                        .first = loc_token,
+                        .last = loc_token,
+                    },
                 },
             },
         });
@@ -161,22 +197,25 @@ pub const Msg = struct {
         var text_buf = try std.Buffer.initSize(allocator, 0);
         defer text_buf.deinit();
 
+        const realpath_copy = try mem.dupe(allocator, u8, realpath);
+        errdefer allocator.free(realpath_copy);
+
         var out_stream = &std.io.BufferOutStream.init(&text_buf).stream;
         try parse_error.render(&tree.tokens, out_stream);
 
         const msg = try allocator.create(Msg{
             .text = undefined,
+            .realpath = realpath_copy,
             .data = Data{
                 .PathAndTree = PathAndTree{
                     .allocator = allocator,
-                    .realpath = realpath,
                     .tree = tree,
+                    .span = Span{
+                        .first = loc_token,
+                        .last = loc_token,
+                    },
                 },
             },
-            .span = Span{
-                .first = loc_token,
-                .last = loc_token,
-            },
         });
         msg.text = text_buf.toOwnedSlice();
         errdefer allocator.destroy(msg);
@@ -185,20 +224,28 @@ pub const Msg = struct {
     }
 
     pub fn printToStream(msg: *const Msg, stream: var, color_on: bool) !void {
+        switch (msg.data) {
+            Data.Cli => {
+                try stream.print("{}:-:-: error: {}\n", msg.realpath, msg.text);
+                return;
+            },
+            else => {},
+        }
+
         const allocator = msg.getAllocator();
-        const realpath = msg.getRealPath();
         const tree = msg.getTree();
 
         const cwd = try os.getCwd(allocator);
         defer allocator.free(cwd);
 
-        const relpath = try os.path.relative(allocator, cwd, realpath);
+        const relpath = try os.path.relative(allocator, cwd, msg.realpath);
         defer allocator.free(relpath);
 
-        const path = if (relpath.len < realpath.len) relpath else realpath;
+        const path = if (relpath.len < msg.realpath.len) relpath else msg.realpath;
+        const span = msg.getSpan();
 
-        const first_token = tree.tokens.at(msg.span.first);
-        const last_token = tree.tokens.at(msg.span.last);
+        const first_token = tree.tokens.at(span.first);
+        const last_token = tree.tokens.at(span.last);
         const start_loc = tree.tokenLocationPtr(0, first_token);
         const end_loc = tree.tokenLocationPtr(first_token.end, last_token);
         if (!color_on) {
src-self-hosted/ir.zig
@@ -961,6 +961,7 @@ pub const Code = struct {
     basic_block_list: std.ArrayList(*BasicBlock),
     arena: std.heap.ArenaAllocator,
     return_type: ?*Type,
+    tree_scope: *Scope.AstTree,
 
     /// allocator is comp.gpa()
     pub fn destroy(self: *Code, allocator: *Allocator) void {
@@ -990,14 +991,14 @@ pub const Code = struct {
                     return ret_value.val.KnownValue.getRef();
                 }
                 try comp.addCompileError(
-                    ret_value.scope.findRoot(),
+                    self.tree_scope,
                     ret_value.span,
                     "unable to evaluate constant expression",
                 );
                 return error.SemanticAnalysisFailed;
             } else if (inst.hasSideEffects()) {
                 try comp.addCompileError(
-                    inst.scope.findRoot(),
+                    self.tree_scope,
                     inst.span,
                     "unable to evaluate constant expression",
                 );
@@ -1013,25 +1014,24 @@ pub const Builder = struct {
     code: *Code,
     current_basic_block: *BasicBlock,
     next_debug_id: usize,
-    root_scope: *Scope.Root,
     is_comptime: bool,
     is_async: bool,
     begin_scope: ?*Scope,
 
     pub const Error = Analyze.Error;
 
-    pub fn init(comp: *Compilation, root_scope: *Scope.Root, begin_scope: ?*Scope) !Builder {
+    pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, begin_scope: ?*Scope) !Builder {
         const code = try comp.gpa().create(Code{
             .basic_block_list = undefined,
             .arena = std.heap.ArenaAllocator.init(comp.gpa()),
             .return_type = null,
+            .tree_scope = tree_scope,
         });
         code.basic_block_list = std.ArrayList(*BasicBlock).init(&code.arena.allocator);
         errdefer code.destroy(comp.gpa());
 
         return Builder{
             .comp = comp,
-            .root_scope = root_scope,
             .current_basic_block = undefined,
             .code = code,
             .next_debug_id = 0,
@@ -1292,6 +1292,7 @@ pub const Builder = struct {
                 Scope.Id.FnDef => return false,
                 Scope.Id.Decls => unreachable,
                 Scope.Id.Root => unreachable,
+                Scope.Id.AstTree => unreachable,
                 Scope.Id.Block,
                 Scope.Id.Defer,
                 Scope.Id.DeferExpr,
@@ -1302,7 +1303,7 @@ pub const Builder = struct {
     }
 
     pub fn genIntLit(irb: *Builder, int_lit: *ast.Node.IntegerLiteral, scope: *Scope) !*Inst {
-        const int_token = irb.root_scope.tree.tokenSlice(int_lit.token);
+        const int_token = irb.code.tree_scope.tree.tokenSlice(int_lit.token);
 
         var base: u8 = undefined;
         var rest: []const u8 = undefined;
@@ -1341,7 +1342,7 @@ pub const Builder = struct {
     }
 
     pub async fn genStrLit(irb: *Builder, str_lit: *ast.Node.StringLiteral, scope: *Scope) !*Inst {
-        const str_token = irb.root_scope.tree.tokenSlice(str_lit.token);
+        const str_token = irb.code.tree_scope.tree.tokenSlice(str_lit.token);
         const src_span = Span.token(str_lit.token);
 
         var bad_index: usize = undefined;
@@ -1349,7 +1350,7 @@ pub const Builder = struct {
             error.OutOfMemory => return error.OutOfMemory,
             error.InvalidCharacter => {
                 try irb.comp.addCompileError(
-                    irb.root_scope,
+                    irb.code.tree_scope,
                     src_span,
                     "invalid character in string literal: '{c}'",
                     str_token[bad_index],
@@ -1427,7 +1428,7 @@ pub const Builder = struct {
 
             if (statement_node.cast(ast.Node.Defer)) |defer_node| {
                 // defer starts a new scope
-                const defer_token = irb.root_scope.tree.tokens.at(defer_node.defer_token);
+                const defer_token = irb.code.tree_scope.tree.tokens.at(defer_node.defer_token);
                 const kind = switch (defer_token.id) {
                     Token.Id.Keyword_defer => Scope.Defer.Kind.ScopeExit,
                     Token.Id.Keyword_errdefer => Scope.Defer.Kind.ErrorExit,
@@ -1513,7 +1514,7 @@ pub const Builder = struct {
                 const src_span = Span.token(control_flow_expr.ltoken);
                 if (scope.findFnDef() == null) {
                     try irb.comp.addCompileError(
-                        irb.root_scope,
+                        irb.code.tree_scope,
                         src_span,
                         "return expression outside function definition",
                     );
@@ -1523,7 +1524,7 @@ pub const Builder = struct {
                 if (scope.findDeferExpr()) |scope_defer_expr| {
                     if (!scope_defer_expr.reported_err) {
                         try irb.comp.addCompileError(
-                            irb.root_scope,
+                            irb.code.tree_scope,
                             src_span,
                             "cannot return from defer expression",
                         );
@@ -1599,7 +1600,7 @@ pub const Builder = struct {
 
     pub async fn genIdentifier(irb: *Builder, identifier: *ast.Node.Identifier, scope: *Scope, lval: LVal) !*Inst {
         const src_span = Span.token(identifier.token);
-        const name = irb.root_scope.tree.tokenSlice(identifier.token);
+        const name = irb.code.tree_scope.tree.tokenSlice(identifier.token);
 
         //if (buf_eql_str(variable_name, "_") && lval == LValPtr) {
         //    IrInstructionConst *const_instruction = ir_build_instruction<IrInstructionConst>(irb, scope, node);
@@ -1622,7 +1623,7 @@ pub const Builder = struct {
             }
         } else |err| switch (err) {
             error.Overflow => {
-                try irb.comp.addCompileError(irb.root_scope, src_span, "integer too large");
+                try irb.comp.addCompileError(irb.code.tree_scope, src_span, "integer too large");
                 return error.SemanticAnalysisFailed;
             },
             error.OutOfMemory => return error.OutOfMemory,
@@ -1656,7 +1657,7 @@ pub const Builder = struct {
         // TODO put a variable of same name with invalid type in global scope
         // so that future references to this same name will find a variable with an invalid type
 
-        try irb.comp.addCompileError(irb.root_scope, src_span, "unknown identifier '{}'", name);
+        try irb.comp.addCompileError(irb.code.tree_scope, src_span, "unknown identifier '{}'", name);
         return error.SemanticAnalysisFailed;
     }
 
@@ -1689,6 +1690,7 @@ pub const Builder = struct {
                 => scope = scope.parent orelse break,
 
                 Scope.Id.DeferExpr => unreachable,
+                Scope.Id.AstTree => unreachable,
             }
         }
         return result;
@@ -1740,6 +1742,7 @@ pub const Builder = struct {
                 => scope = scope.parent orelse return is_noreturn,
 
                 Scope.Id.DeferExpr => unreachable,
+                Scope.Id.AstTree => unreachable,
             }
         }
     }
@@ -1968,8 +1971,8 @@ const Analyze = struct {
         OutOfMemory,
     };
 
-    pub fn init(comp: *Compilation, root_scope: *Scope.Root, explicit_return_type: ?*Type) !Analyze {
-        var irb = try Builder.init(comp, root_scope, null);
+    pub fn init(comp: *Compilation, tree_scope: *Scope.AstTree, explicit_return_type: ?*Type) !Analyze {
+        var irb = try Builder.init(comp, tree_scope, null);
         errdefer irb.abort();
 
         return Analyze{
@@ -2047,7 +2050,7 @@ const Analyze = struct {
     }
 
     fn addCompileError(self: *Analyze, span: Span, comptime fmt: []const u8, args: ...) !void {
-        return self.irb.comp.addCompileError(self.irb.root_scope, span, fmt, args);
+        return self.irb.comp.addCompileError(self.irb.code.tree_scope, span, fmt, args);
     }
 
     fn resolvePeerTypes(self: *Analyze, expected_type: ?*Type, peers: []const *Inst) Analyze.Error!*Type {
@@ -2535,9 +2538,10 @@ const Analyze = struct {
 pub async fn gen(
     comp: *Compilation,
     body_node: *ast.Node,
+    tree_scope: *Scope.AstTree,
     scope: *Scope,
 ) !*Code {
-    var irb = try Builder.init(comp, scope.findRoot(), scope);
+    var irb = try Builder.init(comp, tree_scope, scope);
     errdefer irb.abort();
 
     const entry_block = try irb.createBasicBlock(scope, c"Entry");
@@ -2555,9 +2559,8 @@ pub async fn gen(
 
 pub async fn analyze(comp: *Compilation, old_code: *Code, expected_type: ?*Type) !*Code {
     const old_entry_bb = old_code.basic_block_list.at(0);
-    const root_scope = old_entry_bb.scope.findRoot();
 
-    var ira = try Analyze.init(comp, root_scope, expected_type);
+    var ira = try Analyze.init(comp, old_code.tree_scope, expected_type);
     errdefer ira.abort();
 
     const new_entry_bb = try ira.getNewBasicBlock(old_entry_bb, null);
src-self-hosted/main.zig
@@ -24,6 +24,8 @@ var stderr_file: os.File = undefined;
 var stderr: *io.OutStream(io.FileOutStream.Error) = undefined;
 var stdout: *io.OutStream(io.FileOutStream.Error) = undefined;
 
+const max_src_size = 2 * 1024 * 1024 * 1024; // 2 GiB
+
 const usage =
     \\usage: zig [command] [options]
     \\
@@ -71,26 +73,26 @@ pub fn main() !void {
     }
 
     const commands = []Command{
-        //Command{
-        //    .name = "build-exe",
-        //    .exec = cmdBuildExe,
-        //},
-        //Command{
-        //    .name = "build-lib",
-        //    .exec = cmdBuildLib,
-        //},
-        //Command{
-        //    .name = "build-obj",
-        //    .exec = cmdBuildObj,
-        //},
+        Command{
+            .name = "build-exe",
+            .exec = cmdBuildExe,
+        },
+        Command{
+            .name = "build-lib",
+            .exec = cmdBuildLib,
+        },
+        Command{
+            .name = "build-obj",
+            .exec = cmdBuildObj,
+        },
         Command{
             .name = "fmt",
             .exec = cmdFmt,
         },
-        //Command{
-        //    .name = "libc",
-        //    .exec = cmdLibC,
-        //},
+        Command{
+            .name = "libc",
+            .exec = cmdLibC,
+        },
         Command{
             .name = "targets",
             .exec = cmdTargets,
@@ -472,16 +474,21 @@ fn buildOutputType(allocator: *Allocator, args: []const []const u8, out_type: Co
 }
 
 async fn processBuildEvents(comp: *Compilation, color: errmsg.Color) void {
+    var count: usize = 0;
     while (true) {
         // TODO directly awaiting async should guarantee memory allocation elision
         const build_event = await (async comp.events.get() catch unreachable);
+        count += 1;
 
         switch (build_event) {
-            Compilation.Event.Ok => {},
+            Compilation.Event.Ok => {
+                stderr.print("Build {} succeeded\n", count) catch os.exit(1);
+            },
             Compilation.Event.Error => |err| {
-                stderr.print("build failed: {}\n", @errorName(err)) catch os.exit(1);
+                stderr.print("Build {} failed: {}\n", count, @errorName(err)) catch os.exit(1);
             },
             Compilation.Event.Fail => |msgs| {
+                stderr.print("Build {} compile errors:\n", count) catch os.exit(1);
                 for (msgs) |msg| {
                     defer msg.destroy();
                     msg.printToFile(&stderr_file, color) catch os.exit(1);
@@ -614,7 +621,7 @@ fn cmdFmt(allocator: *Allocator, args: []const []const u8) !void {
         var stdin_file = try io.getStdIn();
         var stdin = io.FileInStream.init(&stdin_file);
 
-        const source_code = try stdin.stream.readAllAlloc(allocator, @maxValue(usize));
+        const source_code = try stdin.stream.readAllAlloc(allocator, max_src_size);
         defer allocator.free(source_code);
 
         var tree = std.zig.parse(allocator, source_code) catch |err| {
@@ -697,12 +704,6 @@ async fn asyncFmtMain(
     suspend {
         resume @handle();
     }
-    // Things we need to make event-based:
-    // * opening the file in the first place - the open()
-    // * read()
-    // * readdir()
-    // * the actual parsing and rendering
-    // * rename()
     var fmt = Fmt{
         .seen = event.Locked(Fmt.SeenMap).init(loop, Fmt.SeenMap.init(loop.allocator)),
         .any_error = false,
@@ -714,7 +715,10 @@ async fn asyncFmtMain(
     for (flags.positionals.toSliceConst()) |file_path| {
         try group.call(fmtPath, &fmt, file_path);
     }
-    return await (async group.wait() catch unreachable);
+    try await (async group.wait() catch unreachable);
+    if (fmt.any_error) {
+        os.exit(1);
+    }
 }
 
 async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8) FmtError!void {
@@ -731,9 +735,10 @@ async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8) FmtError!void {
     const source_code = (await try async event.fs.readFile(
         fmt.loop,
         file_path,
-        2 * 1024 * 1024 * 1024,
+        max_src_size,
     )) catch |err| switch (err) {
         error.IsDir => {
+            // TODO make event based (and dir.next())
             var dir = try std.os.Dir.open(fmt.loop.allocator, file_path);
             defer dir.close();
 
@@ -774,6 +779,7 @@ async fn fmtPath(fmt: *Fmt, file_path_ref: []const u8) FmtError!void {
         return;
     }
 
+    // TODO make this evented
     const baf = try io.BufferedAtomicFile.create(fmt.loop.allocator, file_path);
     defer baf.destroy();
 
src-self-hosted/scope.zig
@@ -63,6 +63,8 @@ pub const Scope = struct {
                 Id.CompTime,
                 Id.Var,
                 => scope = scope.parent.?,
+
+                Id.AstTree => unreachable,
             }
         }
     }
@@ -83,6 +85,8 @@ pub const Scope = struct {
                 Id.Root,
                 Id.Var,
                 => scope = scope.parent orelse return null,
+
+                Id.AstTree => unreachable,
             }
         }
     }
@@ -132,6 +136,7 @@ pub const Scope = struct {
         }
 
         pub fn destroy(self: *Root, comp: *Compilation) void {
+            // TODO comp.fs_watch.removeFile(self.realpath);
             self.decls.base.deref(comp);
             comp.gpa().free(self.realpath);
             comp.gpa().destroy(self);
@@ -144,13 +149,13 @@ pub const Scope = struct {
 
         /// Creates a scope with 1 reference
         /// Takes ownership of tree, will deinit and destroy when done.
-        pub fn create(comp: *Compilation, tree: *ast.Tree, root: *Root) !*AstTree {
-            const self = try comp.gpa().createOne(Root);
+        pub fn create(comp: *Compilation, tree: *ast.Tree, root_scope: *Root) !*AstTree {
+            const self = try comp.gpa().createOne(AstTree);
             self.* = AstTree{
                 .base = undefined,
                 .tree = tree,
             };
-            self.base.init(Id.AstTree, &root.base);
+            self.base.init(Id.AstTree, &root_scope.base);
 
             return self;
         }
@@ -181,7 +186,6 @@ pub const Scope = struct {
             self.* = Decls{
                 .base = undefined,
                 .table = event.RwLocked(Decl.Table).init(comp.loop, Decl.Table.init(comp.gpa())),
-                .name_future = event.Future(void).init(comp.loop),
             };
             self.base.init(Id.Decls, parent);
             return self;
src-self-hosted/test.zig
@@ -212,9 +212,10 @@ pub const TestContext = struct {
             Compilation.Event.Fail => |msgs| {
                 assertOrPanic(msgs.len != 0);
                 for (msgs) |msg| {
-                    if (mem.endsWith(u8, msg.getRealPath(), path) and mem.eql(u8, msg.text, text)) {
-                        const first_token = msg.getTree().tokens.at(msg.span.first);
-                        const last_token = msg.getTree().tokens.at(msg.span.first);
+                    if (mem.endsWith(u8, msg.realpath, path) and mem.eql(u8, msg.text, text)) {
+                        const span = msg.getSpan();
+                        const first_token = msg.getTree().tokens.at(span.first);
+                        const last_token = msg.getTree().tokens.at(span.first);
                         const start_loc = msg.getTree().tokenLocationPtr(0, first_token);
                         if (start_loc.line + 1 == line and start_loc.column + 1 == column) {
                             return;
std/event/fs.zig
@@ -367,109 +367,193 @@ pub async fn readFile(loop: *event.Loop, file_path: []const u8, max_size: usize)
     }
 }
 
-pub const Watch = struct {
-    channel: *event.Channel(Event),
-    putter: promise,
-
-    pub const Event = union(enum) {
-        CloseWrite,
-        Err: Error,
-    };
-
-    pub const Error = error{
-        UserResourceLimitReached,
-        SystemResources,
-    };
-
-    pub fn destroy(self: *Watch) void {
-        // TODO https://github.com/ziglang/zig/issues/1261
-        cancel self.putter;
-    }
-};
-
-pub fn watchFile(loop: *event.Loop, file_path: []const u8) !*Watch {
-    const path_with_null = try std.cstr.addNullByte(loop.allocator, file_path);
-    defer loop.allocator.free(path_with_null);
+pub fn Watch(comptime V: type) type {
+    return struct {
+        channel: *event.Channel(Event),
+        putter: promise,
+        wd_table: WdTable,
+        table_lock: event.Lock,
+        inotify_fd: i32,
+
+        const WdTable = std.AutoHashMap(i32, Dir);
+        const FileTable = std.AutoHashMap([]const u8, V);
+
+        const Self = this;
+
+        const Dir = struct {
+            dirname: []const u8,
+            file_table: FileTable,
+        };
 
-    const inotify_fd = try os.linuxINotifyInit1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
-    errdefer os.close(inotify_fd);
+        pub const Event = union(enum) {
+            CloseWrite: V,
+            Err: Error,
 
-    const wd = try os.linuxINotifyAddWatchC(inotify_fd, path_with_null.ptr, os.linux.IN_CLOSE_WRITE);
-    errdefer os.close(wd);
+            pub const Error = error{
+                UserResourceLimitReached,
+                SystemResources,
+            };
+        };
 
-    const channel = try event.Channel(Watch.Event).create(loop, 0);
-    errdefer channel.destroy();
+        pub fn create(loop: *event.Loop, event_buf_count: usize) !*Self {
+            const inotify_fd = try os.linuxINotifyInit1(os.linux.IN_NONBLOCK | os.linux.IN_CLOEXEC);
+            errdefer os.close(inotify_fd);
 
-    var result: *Watch = undefined;
-    _ = try async<loop.allocator> watchEventPutter(inotify_fd, wd, channel, &result);
-    return result;
-}
-
-async fn watchEventPutter(inotify_fd: i32, wd: i32, channel: *event.Channel(Watch.Event), out_watch: **Watch) void {
-    // TODO https://github.com/ziglang/zig/issues/1194
-    suspend {
-        resume @handle();
-    }
+            const channel = try event.Channel(Self.Event).create(loop, event_buf_count);
+            errdefer channel.destroy();
 
-    var watch = Watch{
-        .putter = @handle(),
-        .channel = channel,
-    };
-    out_watch.* = &watch;
+            var result: *Self = undefined;
+            _ = try async<loop.allocator> eventPutter(inotify_fd, channel, &result);
+            return result;
+        }
 
-    const loop = channel.loop;
-    loop.beginOneEvent();
+        pub fn destroy(self: *Self) void {
+            cancel self.putter;
+        }
 
-    defer {
-        channel.destroy();
-        os.close(wd);
-        os.close(inotify_fd);
-        loop.finishOneEvent();
-    }
+        pub async fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
+            const dirname = os.path.dirname(file_path) orelse ".";
+            const dirname_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, dirname);
+            var dirname_with_null_consumed = false;
+            defer if (!dirname_with_null_consumed) self.channel.loop.allocator.free(dirname_with_null);
+
+            const basename = os.path.basename(file_path);
+            const basename_with_null = try std.cstr.addNullByte(self.channel.loop.allocator, basename);
+            var basename_with_null_consumed = false;
+            defer if (!basename_with_null_consumed) self.channel.loop.allocator.free(basename_with_null);
+
+            const wd = try os.linuxINotifyAddWatchC(
+                self.inotify_fd,
+                dirname_with_null.ptr,
+                os.linux.IN_CLOSE_WRITE | os.linux.IN_ONLYDIR | os.linux.IN_EXCL_UNLINK,
+            );
+            // wd is either a newly created watch or an existing one.
+
+            const held = await (async self.table_lock.acquire() catch unreachable);
+            defer held.release();
+
+            const gop = try self.wd_table.getOrPut(wd);
+            if (!gop.found_existing) {
+                gop.kv.value = Dir{
+                    .dirname = dirname_with_null,
+                    .file_table = FileTable.init(self.channel.loop.allocator),
+                };
+                dirname_with_null_consumed = true;
+            }
+            const dir = &gop.kv.value;
+
+            const file_table_gop = try dir.file_table.getOrPut(basename_with_null);
+            if (file_table_gop.found_existing) {
+                const prev_value = file_table_gop.kv.value;
+                file_table_gop.kv.value = value;
+                return prev_value;
+            } else {
+                file_table_gop.kv.value = value;
+                basename_with_null_consumed = true;
+                return null;
+            }
+        }
 
-    var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined;
+        pub async fn removeFile(self: *Self, file_path: []const u8) ?V {
+            @panic("TODO");
+        }
 
-    while (true) {
-        const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len);
-        const errno = os.linux.getErrno(rc);
-        switch (errno) {
-            0 => {
-                // can't use @bytesToSlice because of the special variable length name field
-                var ptr = event_buf[0..].ptr;
-                const end_ptr = ptr + event_buf.len;
-                var ev: *os.linux.inotify_event = undefined;
-                while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) {
-                    ev = @ptrCast(*os.linux.inotify_event, ptr);
-                    if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
-                        await (async channel.put(Watch.Event.CloseWrite) catch unreachable);
+        async fn eventPutter(inotify_fd: i32, channel: *event.Channel(Event), out_watch: **Self) void {
+            // TODO https://github.com/ziglang/zig/issues/1194
+            suspend {
+                resume @handle();
+            }
+
+            const loop = channel.loop;
+
+            var watch = Self{
+                .putter = @handle(),
+                .channel = channel,
+                .wd_table = WdTable.init(loop.allocator),
+                .table_lock = event.Lock.init(loop),
+                .inotify_fd = inotify_fd,
+            };
+            out_watch.* = &watch;
+
+            loop.beginOneEvent();
+
+            defer {
+                watch.table_lock.deinit();
+                {
+                    var wd_it = watch.wd_table.iterator();
+                    while (wd_it.next()) |wd_entry| {
+                        var file_it = wd_entry.value.file_table.iterator();
+                        while (file_it.next()) |file_entry| {
+                            loop.allocator.free(file_entry.key);
+                        }
+                        loop.allocator.free(wd_entry.value.dirname);
                     }
                 }
-            },
-            os.linux.EINTR => continue,
-            os.linux.EINVAL => unreachable,
-            os.linux.EFAULT => unreachable,
-            os.linux.EAGAIN => {
-                (await (async loop.linuxWaitFd(
-                    inotify_fd,
-                    os.linux.EPOLLET | os.linux.EPOLLIN,
-                ) catch unreachable)) catch |err| {
-                    const transformed_err = switch (err) {
-                        error.InvalidFileDescriptor => unreachable,
-                        error.FileDescriptorAlreadyPresentInSet => unreachable,
-                        error.InvalidSyscall => unreachable,
-                        error.OperationCausesCircularLoop => unreachable,
-                        error.FileDescriptorNotRegistered => unreachable,
-                        error.SystemResources => error.SystemResources,
-                        error.UserResourceLimitReached => error.UserResourceLimitReached,
-                        error.FileDescriptorIncompatibleWithEpoll => unreachable,
-                        error.Unexpected => unreachable,
-                    };
-                    await (async channel.put(Watch.Event{ .Err = transformed_err }) catch unreachable);
-                };
-            },
-            else => unreachable,
+                loop.finishOneEvent();
+                os.close(inotify_fd);
+                channel.destroy();
+            }
+
+            var event_buf: [4096]u8 align(@alignOf(os.linux.inotify_event)) = undefined;
+
+            while (true) {
+                const rc = os.linux.read(inotify_fd, &event_buf, event_buf.len);
+                const errno = os.linux.getErrno(rc);
+                switch (errno) {
+                    0 => {
+                        // can't use @bytesToSlice because of the special variable length name field
+                        var ptr = event_buf[0..].ptr;
+                        const end_ptr = ptr + event_buf.len;
+                        var ev: *os.linux.inotify_event = undefined;
+                        while (@ptrToInt(ptr) < @ptrToInt(end_ptr)) : (ptr += @sizeOf(os.linux.inotify_event) + ev.len) {
+                            ev = @ptrCast(*os.linux.inotify_event, ptr);
+                            if (ev.mask & os.linux.IN_CLOSE_WRITE == os.linux.IN_CLOSE_WRITE) {
+                                const basename_ptr = ptr + @sizeOf(os.linux.inotify_event);
+                                const basename_with_null = basename_ptr[0 .. std.cstr.len(basename_ptr) + 1];
+                                const user_value = blk: {
+                                    const held = await (async watch.table_lock.acquire() catch unreachable);
+                                    defer held.release();
+
+                                    const dir = &watch.wd_table.get(ev.wd).?.value;
+                                    if (dir.file_table.get(basename_with_null)) |entry| {
+                                        break :blk entry.value;
+                                    } else {
+                                        break :blk null;
+                                    }
+                                };
+                                if (user_value) |v| {
+                                    await (async channel.put(Self.Event{ .CloseWrite = v }) catch unreachable);
+                                }
+                            }
+                        }
+                    },
+                    os.linux.EINTR => continue,
+                    os.linux.EINVAL => unreachable,
+                    os.linux.EFAULT => unreachable,
+                    os.linux.EAGAIN => {
+                        (await (async loop.linuxWaitFd(
+                            inotify_fd,
+                            os.linux.EPOLLET | os.linux.EPOLLIN,
+                        ) catch unreachable)) catch |err| {
+                            const transformed_err = switch (err) {
+                                error.InvalidFileDescriptor => unreachable,
+                                error.FileDescriptorAlreadyPresentInSet => unreachable,
+                                error.InvalidSyscall => unreachable,
+                                error.OperationCausesCircularLoop => unreachable,
+                                error.FileDescriptorNotRegistered => unreachable,
+                                error.SystemResources => error.SystemResources,
+                                error.UserResourceLimitReached => error.UserResourceLimitReached,
+                                error.FileDescriptorIncompatibleWithEpoll => unreachable,
+                                error.Unexpected => unreachable,
+                            };
+                            await (async channel.put(Self.Event{ .Err = transformed_err }) catch unreachable);
+                        };
+                    },
+                    else => unreachable,
+                }
+            }
         }
-    }
+    };
 }
 
 const test_tmp_dir = "std_event_fs_test";
@@ -517,9 +601,11 @@ async fn testFsWatch(loop: *event.Loop) !void {
     assert(mem.eql(u8, read_contents, contents));
 
     // now watch the file
-    var watch = try watchFile(loop, file_path);
+    var watch = try Watch(void).create(loop, 0);
     defer watch.destroy();
 
+    assert((try await try async watch.addFile(file_path, {})) == null);
+
     const ev = try async watch.channel.get();
     var ev_consumed = false;
     defer if (!ev_consumed) cancel ev;
@@ -534,8 +620,8 @@ async fn testFsWatch(loop: *event.Loop) !void {
 
     ev_consumed = true;
     switch (await ev) {
-        Watch.Event.CloseWrite => {},
-        Watch.Event.Err => |err| return err,
+        Watch(void).Event.CloseWrite => {},
+        Watch(void).Event.Err => |err| return err,
     }
 
     const contents_updated = try await try async readFile(loop, file_path, 1024 * 1024);
std/event/rwlock.zig
@@ -10,6 +10,8 @@ const Loop = std.event.Loop;
 /// Does not make any syscalls - coroutines which are waiting for the lock are suspended, and
 /// are resumed when the lock is released, in order.
 /// Many readers can hold the lock at the same time; however locking for writing is exclusive.
+/// When a read lock is held, it will not be released until the reader queue is empty.
+/// When a write lock is held, it will not be released until the writer queue is empty.
 pub const RwLock = struct {
     loop: *Loop,
     shared_state: u8, // TODO make this an enum
std/special/build_runner.zig
@@ -72,10 +72,10 @@ pub fn main() !void {
             if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| {
                 const option_name = option_contents[0..name_end];
                 const option_value = option_contents[name_end + 1 ..];
-                if (builder.addUserInputOption(option_name, option_value))
+                if (try builder.addUserInputOption(option_name, option_value))
                     return usageAndErr(&builder, false, try stderr_stream);
             } else {
-                if (builder.addUserInputFlag(option_contents))
+                if (try builder.addUserInputFlag(option_contents))
                     return usageAndErr(&builder, false, try stderr_stream);
             }
         } else if (mem.startsWith(u8, arg, "-")) {
std/build.zig
@@ -424,60 +424,69 @@ pub const Builder = struct {
         return mode;
     }
 
-    pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) bool {
-        if (self.user_input_options.put(name, UserInputOption{
-            .name = name,
-            .value = UserValue{ .Scalar = value },
-            .used = false,
-        }) catch unreachable) |*prev_value| {
-            // option already exists
-            switch (prev_value.value) {
-                UserValue.Scalar => |s| {
-                    // turn it into a list
-                    var list = ArrayList([]const u8).init(self.allocator);
-                    list.append(s) catch unreachable;
-                    list.append(value) catch unreachable;
-                    _ = self.user_input_options.put(name, UserInputOption{
-                        .name = name,
-                        .value = UserValue{ .List = list },
-                        .used = false,
-                    }) catch unreachable;
-                },
-                UserValue.List => |*list| {
-                    // append to the list
-                    list.append(value) catch unreachable;
-                    _ = self.user_input_options.put(name, UserInputOption{
-                        .name = name,
-                        .value = UserValue{ .List = list.* },
-                        .used = false,
-                    }) catch unreachable;
-                },
-                UserValue.Flag => {
-                    warn("Option '-D{}={}' conflicts with flag '-D{}'.\n", name, value, name);
-                    return true;
-                },
-            }
+    pub fn addUserInputOption(self: *Builder, name: []const u8, value: []const u8) !bool {
+        const gop = try self.user_input_options.getOrPut(name);
+        if (!gop.found_existing) {
+            gop.kv.value = UserInputOption{
+                .name = name,
+                .value = UserValue{ .Scalar = value },
+                .used = false,
+            };
+            return false;
+        }
+
+        // option already exists
+        switch (gop.kv.value.value) {
+            UserValue.Scalar => |s| {
+                // turn it into a list
+                var list = ArrayList([]const u8).init(self.allocator);
+                list.append(s) catch unreachable;
+                list.append(value) catch unreachable;
+                _ = self.user_input_options.put(name, UserInputOption{
+                    .name = name,
+                    .value = UserValue{ .List = list },
+                    .used = false,
+                }) catch unreachable;
+            },
+            UserValue.List => |*list| {
+                // append to the list
+                list.append(value) catch unreachable;
+                _ = self.user_input_options.put(name, UserInputOption{
+                    .name = name,
+                    .value = UserValue{ .List = list.* },
+                    .used = false,
+                }) catch unreachable;
+            },
+            UserValue.Flag => {
+                warn("Option '-D{}={}' conflicts with flag '-D{}'.\n", name, value, name);
+                return true;
+            },
         }
         return false;
     }
 
-    pub fn addUserInputFlag(self: *Builder, name: []const u8) bool {
-        if (self.user_input_options.put(name, UserInputOption{
-            .name = name,
-            .value = UserValue{ .Flag = {} },
-            .used = false,
-        }) catch unreachable) |*prev_value| {
-            switch (prev_value.value) {
-                UserValue.Scalar => |s| {
-                    warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s);
-                    return true;
-                },
-                UserValue.List => {
-                    warn("Flag '-D{}' conflicts with multiple options of the same name.\n", name);
-                    return true;
-                },
-                UserValue.Flag => {},
-            }
+    pub fn addUserInputFlag(self: *Builder, name: []const u8) !bool {
+        const gop = try self.user_input_options.getOrPut(name);
+        if (!gop.found_existing) {
+            gop.kv.value = UserInputOption{
+                .name = name,
+                .value = UserValue{ .Flag = {} },
+                .used = false,
+            };
+            return false;
+        }
+
+        // option already exists
+        switch (gop.kv.value.value) {
+            UserValue.Scalar => |s| {
+                warn("Flag '-D{}' conflicts with option '-D{}={}'.\n", name, name, s);
+                return true;
+            },
+            UserValue.List => {
+                warn("Flag '-D{}' conflicts with multiple options of the same name.\n", name);
+                return true;
+            },
+            UserValue.Flag => {},
         }
         return false;
     }
std/hash_map.zig
@@ -9,6 +9,10 @@ const builtin = @import("builtin");
 const want_modification_safety = builtin.mode != builtin.Mode.ReleaseFast;
 const debug_u32 = if (want_modification_safety) u32 else void;
 
+pub fn AutoHashMap(comptime K: type, comptime V: type) type {
+    return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K));
+}
+
 pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
     return struct {
         entries: []Entry,
@@ -20,13 +24,22 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
 
         const Self = this;
 
-        pub const Entry = struct {
-            used: bool,
-            distance_from_start_index: usize,
+        pub const KV = struct {
             key: K,
             value: V,
         };
 
+        const Entry = struct {
+            used: bool,
+            distance_from_start_index: usize,
+            kv: KV,
+        };
+
+        pub const GetOrPutResult = struct {
+            kv: *KV,
+            found_existing: bool,
+        };
+
         pub const Iterator = struct {
             hm: *const Self,
             // how many items have we returned
@@ -36,7 +49,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
             // used to detect concurrent modification
             initial_modification_count: debug_u32,
 
-            pub fn next(it: *Iterator) ?*Entry {
+            pub fn next(it: *Iterator) ?*KV {
                 if (want_modification_safety) {
                     assert(it.initial_modification_count == it.hm.modification_count); // concurrent modification
                 }
@@ -46,7 +59,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
                     if (entry.used) {
                         it.index += 1;
                         it.count += 1;
-                        return entry;
+                        return &entry.kv;
                     }
                 }
                 unreachable; // no next item
@@ -71,7 +84,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
             };
         }
 
-        pub fn deinit(hm: *const Self) void {
+        pub fn deinit(hm: Self) void {
             hm.allocator.free(hm.entries);
         }
 
@@ -84,34 +97,65 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
             hm.incrementModificationCount();
         }
 
-        pub fn count(hm: *const Self) usize {
-            return hm.size;
+        pub fn count(self: Self) usize {
+            return self.size;
         }
 
-        /// Returns the value that was already there.
-        pub fn put(hm: *Self, key: K, value: *const V) !?V {
-            if (hm.entries.len == 0) {
-                try hm.initCapacity(16);
+        /// If key exists this function cannot fail.
+        /// If there is an existing item with `key`, then the result
+        /// kv pointer points to it, and found_existing is true.
+        /// Otherwise, puts a new item with undefined value, and
+        /// the kv pointer points to it. Caller should then initialize
+        /// the data.
+        pub fn getOrPut(self: *Self, key: K) !GetOrPutResult {
+            // TODO this implementation can be improved - we should only
+            // have to hash once and find the entry once.
+            if (self.get(key)) |kv| {
+                return GetOrPutResult{
+                    .kv = kv,
+                    .found_existing = true,
+                };
+            }
+            self.incrementModificationCount();
+            try self.ensureCapacity();
+            const put_result = self.internalPut(key);
+            assert(put_result.old_kv == null);
+            return GetOrPutResult{
+                .kv = &put_result.new_entry.kv,
+                .found_existing = false,
+            };
+        }
+
+        fn ensureCapacity(self: *Self) !void {
+            if (self.entries.len == 0) {
+                return self.initCapacity(16);
             }
-            hm.incrementModificationCount();
 
             // if we get too full (60%), double the capacity
-            if (hm.size * 5 >= hm.entries.len * 3) {
-                const old_entries = hm.entries;
-                try hm.initCapacity(hm.entries.len * 2);
+            if (self.size * 5 >= self.entries.len * 3) {
+                const old_entries = self.entries;
+                try self.initCapacity(self.entries.len * 2);
                 // dump all of the old elements into the new table
                 for (old_entries) |*old_entry| {
                     if (old_entry.used) {
-                        _ = hm.internalPut(old_entry.key, old_entry.value);
+                        self.internalPut(old_entry.kv.key).new_entry.kv.value = old_entry.kv.value;
                     }
                 }
-                hm.allocator.free(old_entries);
+                self.allocator.free(old_entries);
             }
+        }
 
-            return hm.internalPut(key, value);
+        /// Returns the kv pair that was already there.
+        pub fn put(self: *Self, key: K, value: V) !?KV {
+            self.incrementModificationCount();
+            try self.ensureCapacity();
+
+            const put_result = self.internalPut(key);
+            put_result.new_entry.kv.value = value;
+            return put_result.old_kv;
         }
 
-        pub fn get(hm: *const Self, key: K) ?*Entry {
+        pub fn get(hm: *const Self, key: K) ?*KV {
             if (hm.entries.len == 0) {
                 return null;
             }
@@ -122,7 +166,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
             return hm.get(key) != null;
         }
 
-        pub fn remove(hm: *Self, key: K) ?*Entry {
+        pub fn remove(hm: *Self, key: K) ?*KV {
             if (hm.entries.len == 0) return null;
             hm.incrementModificationCount();
             const start_index = hm.keyToIndex(key);
@@ -134,7 +178,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
 
                     if (!entry.used) return null;
 
-                    if (!eql(entry.key, key)) continue;
+                    if (!eql(entry.kv.key, key)) continue;
 
                     while (roll_over < hm.entries.len) : (roll_over += 1) {
                         const next_index = (start_index + roll_over + 1) % hm.entries.len;
@@ -142,7 +186,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
                         if (!next_entry.used or next_entry.distance_from_start_index == 0) {
                             entry.used = false;
                             hm.size -= 1;
-                            return entry;
+                            return &entry.kv;
                         }
                         entry.* = next_entry.*;
                         entry.distance_from_start_index -= 1;
@@ -168,7 +212,7 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
             try other.initCapacity(self.entries.len);
             var it = self.iterator();
             while (it.next()) |entry| {
-                try other.put(entry.key, entry.value);
+                assert((try other.put(entry.key, entry.value)) == null);
             }
             return other;
         }
@@ -188,60 +232,81 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
             }
         }
 
-        /// Returns the value that was already there.
-        fn internalPut(hm: *Self, orig_key: K, orig_value: *const V) ?V {
+        const InternalPutResult = struct {
+            new_entry: *Entry,
+            old_kv: ?KV,
+        };
+
+        /// Returns a pointer to the new entry.
+        /// Asserts that there is enough space for the new item.
+        fn internalPut(self: *Self, orig_key: K) InternalPutResult {
             var key = orig_key;
-            var value = orig_value.*;
-            const start_index = hm.keyToIndex(key);
+            var value: V = undefined;
+            const start_index = self.keyToIndex(key);
             var roll_over: usize = 0;
             var distance_from_start_index: usize = 0;
-            while (roll_over < hm.entries.len) : ({
+            var got_result_entry = false;
+            var result = InternalPutResult{
+                .new_entry = undefined,
+                .old_kv = null,
+            };
+            while (roll_over < self.entries.len) : ({
                 roll_over += 1;
                 distance_from_start_index += 1;
             }) {
-                const index = (start_index + roll_over) % hm.entries.len;
-                const entry = &hm.entries[index];
+                const index = (start_index + roll_over) % self.entries.len;
+                const entry = &self.entries[index];
 
-                if (entry.used and !eql(entry.key, key)) {
+                if (entry.used and !eql(entry.kv.key, key)) {
                     if (entry.distance_from_start_index < distance_from_start_index) {
                         // robin hood to the rescue
                         const tmp = entry.*;
-                        hm.max_distance_from_start_index = math.max(hm.max_distance_from_start_index, distance_from_start_index);
+                        self.max_distance_from_start_index = math.max(self.max_distance_from_start_index, distance_from_start_index);
+                        if (!got_result_entry) {
+                            got_result_entry = true;
+                            result.new_entry = entry;
+                        }
                         entry.* = Entry{
                             .used = true,
                             .distance_from_start_index = distance_from_start_index,
-                            .key = key,
-                            .value = value,
+                            .kv = KV{
+                                .key = key,
+                                .value = value,
+                            },
                         };
-                        key = tmp.key;
-                        value = tmp.value;
+                        key = tmp.kv.key;
+                        value = tmp.kv.value;
                         distance_from_start_index = tmp.distance_from_start_index;
                     }
                     continue;
                 }
 
-                var result: ?V = null;
                 if (entry.used) {
-                    result = entry.value;
+                    result.old_kv = entry.kv;
                 } else {
                     // adding an entry. otherwise overwriting old value with
                     // same key
-                    hm.size += 1;
+                    self.size += 1;
                 }
 
-                hm.max_distance_from_start_index = math.max(distance_from_start_index, hm.max_distance_from_start_index);
+                self.max_distance_from_start_index = math.max(distance_from_start_index, self.max_distance_from_start_index);
+                if (!got_result_entry) {
+                    result.new_entry = entry;
+                }
                 entry.* = Entry{
                     .used = true,
                     .distance_from_start_index = distance_from_start_index,
-                    .key = key,
-                    .value = value,
+                    .kv = KV{
+                        .key = key,
+                        .value = value,
+                    },
                 };
                 return result;
             }
             unreachable; // put into a full map
         }
 
-        fn internalGet(hm: *const Self, key: K) ?*Entry {
+        fn internalGet(hm: Self, key: K) ?*KV {
             const start_index = hm.keyToIndex(key);
             {
                 var roll_over: usize = 0;
@@ -250,13 +315,13 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
                     const entry = &hm.entries[index];
 
                     if (!entry.used) return null;
-                    if (eql(entry.key, key)) return entry;
+                    if (eql(entry.kv.key, key)) return &entry.kv;
                 }
             }
             return null;
         }
 
-        fn keyToIndex(hm: *const Self, key: K) usize {
+        fn keyToIndex(hm: Self, key: K) usize {
             return usize(hash(key)) % hm.entries.len;
         }
     };
@@ -266,7 +331,7 @@ test "basic hash map usage" {
     var direct_allocator = std.heap.DirectAllocator.init();
     defer direct_allocator.deinit();
 
-    var map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator);
+    var map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
     defer map.deinit();
 
     assert((try map.put(1, 11)) == null);
@@ -275,8 +340,19 @@ test "basic hash map usage" {
     assert((try map.put(4, 44)) == null);
     assert((try map.put(5, 55)) == null);
 
-    assert((try map.put(5, 66)).? == 55);
-    assert((try map.put(5, 55)).? == 66);
+    assert((try map.put(5, 66)).?.value == 55);
+    assert((try map.put(5, 55)).?.value == 66);
+
+    const gop1 = try map.getOrPut(5);
+    assert(gop1.found_existing == true);
+    assert(gop1.kv.value == 55);
+    gop1.kv.value = 77;
+    assert(map.get(5).?.value == 77);
+
+    const gop2 = try map.getOrPut(99);
+    assert(gop2.found_existing == false);
+    gop2.kv.value = 42;
+    assert(map.get(99).?.value == 42);
 
     assert(map.contains(2));
     assert(map.get(2).?.value == 22);
@@ -289,7 +365,7 @@ test "iterator hash map" {
     var direct_allocator = std.heap.DirectAllocator.init();
     defer direct_allocator.deinit();
 
-    var reset_map = HashMap(i32, i32, hash_i32, eql_i32).init(&direct_allocator.allocator);
+    var reset_map = AutoHashMap(i32, i32).init(&direct_allocator.allocator);
     defer reset_map.deinit();
 
     assert((try reset_map.put(1, 11)) == null);
@@ -332,10 +408,124 @@ test "iterator hash map" {
     assert(entry.value == values[0]);
 }
 
-fn hash_i32(x: i32) u32 {
-    return @bitCast(u32, x);
+pub fn getAutoHashFn(comptime K: type) (fn (K) u32) {
+    return struct {
+        fn hash(key: K) u32 {
+            comptime var rng = comptime std.rand.DefaultPrng.init(0);
+            return autoHash(key, &rng.random, u32);
+        }
+    }.hash;
+}
+
+pub fn getAutoEqlFn(comptime K: type) (fn (K, K) bool) {
+    return struct {
+        fn eql(a: K, b: K) bool {
+            return autoEql(a, b);
+        }
+    }.eql;
+}
+
+// TODO improve these hash functions
+pub fn autoHash(key: var, comptime rng: *std.rand.Random, comptime HashInt: type) HashInt {
+    switch (@typeInfo(@typeOf(key))) {
+        builtin.TypeId.NoReturn,
+        builtin.TypeId.Opaque,
+        builtin.TypeId.Undefined,
+        builtin.TypeId.ArgTuple,
+        => @compileError("cannot hash this type"),
+
+        builtin.TypeId.Void,
+        builtin.TypeId.Null,
+        => return 0,
+
+        builtin.TypeId.Int => |info| {
+            const unsigned_x = @bitCast(@IntType(false, info.bits), key);
+            if (info.bits <= HashInt.bit_count) {
+                return HashInt(unsigned_x) *% comptime rng.scalar(HashInt);
+            } else {
+                return @truncate(HashInt, unsigned_x *% comptime rng.scalar(@typeOf(unsigned_x)));
+            }
+        },
+
+        builtin.TypeId.Float => |info| {
+            return autoHash(@bitCast(@IntType(false, info.bits), key), rng);
+        },
+        builtin.TypeId.Bool => return autoHash(@boolToInt(key), rng),
+        builtin.TypeId.Enum => return autoHash(@enumToInt(key), rng),
+        builtin.TypeId.ErrorSet => return autoHash(@errorToInt(key), rng),
+        builtin.TypeId.Promise, builtin.TypeId.Fn => return autoHash(@ptrToInt(key), rng),
+
+        builtin.TypeId.Namespace,
+        builtin.TypeId.Block,
+        builtin.TypeId.BoundFn,
+        builtin.TypeId.ComptimeFloat,
+        builtin.TypeId.ComptimeInt,
+        builtin.TypeId.Type,
+        => return 0,
+
+        builtin.TypeId.Pointer => |info| switch (info.size) {
+            builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto hash for single item pointers"),
+            builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto hash for many item pointers"),
+            builtin.TypeInfo.Pointer.Size.Slice => {
+                const interval = std.math.max(1, key.len / 256);
+                var i: usize = 0;
+                var h = comptime rng.scalar(HashInt);
+                while (i < key.len) : (i += interval) {
+                    h ^= autoHash(key[i], rng, HashInt);
+                }
+                return h;
+            },
+        },
+
+        builtin.TypeId.Optional => @compileError("TODO auto hash for optionals"),
+        builtin.TypeId.Array => @compileError("TODO auto hash for arrays"),
+        builtin.TypeId.Struct => @compileError("TODO auto hash for structs"),
+        builtin.TypeId.Union => @compileError("TODO auto hash for unions"),
+        builtin.TypeId.ErrorUnion => @compileError("TODO auto hash for unions"),
+    }
 }
 
-fn eql_i32(a: i32, b: i32) bool {
-    return a == b;
+pub fn autoEql(a: var, b: @typeOf(a)) bool {
+    switch (@typeInfo(@typeOf(a))) {
+        builtin.TypeId.NoReturn,
+        builtin.TypeId.Opaque,
+        builtin.TypeId.Undefined,
+        builtin.TypeId.ArgTuple,
+        => @compileError("cannot test equality of this type"),
+        builtin.TypeId.Void,
+        builtin.TypeId.Null,
+        => return true,
+        builtin.TypeId.Bool,
+        builtin.TypeId.Int,
+        builtin.TypeId.Float,
+        builtin.TypeId.ComptimeFloat,
+        builtin.TypeId.ComptimeInt,
+        builtin.TypeId.Namespace,
+        builtin.TypeId.Block,
+        builtin.TypeId.Promise,
+        builtin.TypeId.Enum,
+        builtin.TypeId.BoundFn,
+        builtin.TypeId.Fn,
+        builtin.TypeId.ErrorSet,
+        builtin.TypeId.Type,
+        => return a == b,
+
+        builtin.TypeId.Pointer => |info| switch (info.size) {
+            builtin.TypeInfo.Pointer.Size.One => @compileError("TODO auto eql for single item pointers"),
+            builtin.TypeInfo.Pointer.Size.Many => @compileError("TODO auto eql for many item pointers"),
+            builtin.TypeInfo.Pointer.Size.Slice => {
+                if (a.len != b.len) return false;
+                for (a) |a_item, i| {
+                    if (!autoEql(a_item, b[i])) return false;
+                }
+                return true;
+            },
+        },
+
+        builtin.TypeId.Optional => @compileError("TODO auto eql for optionals"),
+        builtin.TypeId.Array => @compileError("TODO auto eql for arrays"),
+        builtin.TypeId.Struct => @compileError("TODO auto eql for structs"),
+        builtin.TypeId.Union => @compileError("TODO auto eql for unions"),
+        builtin.TypeId.ErrorUnion => @compileError("TODO auto eql for unions"),
+    }
 }
std/index.zig
@@ -5,6 +5,7 @@ pub const BufSet = @import("buf_set.zig").BufSet;
 pub const Buffer = @import("buffer.zig").Buffer;
 pub const BufferOutStream = @import("buffer.zig").BufferOutStream;
 pub const HashMap = @import("hash_map.zig").HashMap;
+pub const AutoHashMap = @import("hash_map.zig").AutoHashMap;
 pub const LinkedList = @import("linked_list.zig").LinkedList;
 pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
 pub const DynLib = @import("dynamic_library.zig").DynLib;
std/json.zig
@@ -1318,7 +1318,7 @@ pub const Parser = struct {
                 _ = p.stack.pop();
 
                 var object = &p.stack.items[p.stack.len - 1].Object;
-                _ = try object.put(key, value);
+                _ = try object.put(key, value.*);
                 p.state = State.ObjectKey;
             },
             // Array Parent -> [ ..., <array>, value ]