Commit f1768b40b2

Veikka Tuominen <git@vexu.eu>
2022-07-28 16:28:37
stage2: better source location for var decls
1 parent 57f9405
src/AstGen.zig
@@ -7424,7 +7424,8 @@ fn builtinCall(
             const token_starts = tree.tokens.items(.start);
             const node_start = token_starts[tree.firstToken(node)];
             astgen.advanceSourceCursor(node_start);
-            const result = try gz.addExtendedPayload(.builtin_src, Zir.Inst.LineColumn{
+            const result = try gz.addExtendedPayload(.builtin_src, Zir.Inst.Src{
+                .node = gz.nodeIndexToRelative(node),
                 .line = astgen.source_line,
                 .column = astgen.source_column,
             });
src/Module.zig
@@ -2161,6 +2161,10 @@ pub const SrcLoc = struct {
                     .local_var_decl => tree.localVarDecl(node),
                     .simple_var_decl => tree.simpleVarDecl(node),
                     .aligned_var_decl => tree.alignedVarDecl(node),
+                    .@"usingnamespace" => {
+                        const node_data = tree.nodes.items(.data);
+                        return nodeToSpan(tree, node_data[node].lhs);
+                    },
                     else => unreachable,
                 };
                 if (full.ast.type_node != 0) {
@@ -2171,6 +2175,58 @@ pub const SrcLoc = struct {
                 const end = start + @intCast(u32, tree.tokenSlice(tok_index).len);
                 return Span{ .start = start, .end = end, .main = start };
             },
+            .node_offset_var_decl_align => |node_off| {
+                const tree = try src_loc.file_scope.getTree(gpa);
+                const node = src_loc.declRelativeToNodeIndex(node_off);
+                const node_tags = tree.nodes.items(.tag);
+                const full: Ast.full.VarDecl = switch (node_tags[node]) {
+                    .global_var_decl => tree.globalVarDecl(node),
+                    .local_var_decl => tree.localVarDecl(node),
+                    .simple_var_decl => tree.simpleVarDecl(node),
+                    .aligned_var_decl => tree.alignedVarDecl(node),
+                    else => unreachable,
+                };
+                return nodeToSpan(tree, full.ast.align_node);
+            },
+            .node_offset_var_decl_section => |node_off| {
+                const tree = try src_loc.file_scope.getTree(gpa);
+                const node = src_loc.declRelativeToNodeIndex(node_off);
+                const node_tags = tree.nodes.items(.tag);
+                const full: Ast.full.VarDecl = switch (node_tags[node]) {
+                    .global_var_decl => tree.globalVarDecl(node),
+                    .local_var_decl => tree.localVarDecl(node),
+                    .simple_var_decl => tree.simpleVarDecl(node),
+                    .aligned_var_decl => tree.alignedVarDecl(node),
+                    else => unreachable,
+                };
+                return nodeToSpan(tree, full.ast.section_node);
+            },
+            .node_offset_var_decl_addrspace => |node_off| {
+                const tree = try src_loc.file_scope.getTree(gpa);
+                const node = src_loc.declRelativeToNodeIndex(node_off);
+                const node_tags = tree.nodes.items(.tag);
+                const full: Ast.full.VarDecl = switch (node_tags[node]) {
+                    .global_var_decl => tree.globalVarDecl(node),
+                    .local_var_decl => tree.localVarDecl(node),
+                    .simple_var_decl => tree.simpleVarDecl(node),
+                    .aligned_var_decl => tree.alignedVarDecl(node),
+                    else => unreachable,
+                };
+                return nodeToSpan(tree, full.ast.addrspace_node);
+            },
+            .node_offset_var_decl_init => |node_off| {
+                const tree = try src_loc.file_scope.getTree(gpa);
+                const node = src_loc.declRelativeToNodeIndex(node_off);
+                const node_tags = tree.nodes.items(.tag);
+                const full: Ast.full.VarDecl = switch (node_tags[node]) {
+                    .global_var_decl => tree.globalVarDecl(node),
+                    .local_var_decl => tree.localVarDecl(node),
+                    .simple_var_decl => tree.simpleVarDecl(node),
+                    .aligned_var_decl => tree.alignedVarDecl(node),
+                    else => unreachable,
+                };
+                return nodeToSpan(tree, full.ast.init_node);
+            },
             .node_offset_builtin_call_arg0 => |n| return src_loc.byteOffsetBuiltinCallArg(gpa, n, 0),
             .node_offset_builtin_call_arg1 => |n| return src_loc.byteOffsetBuiltinCallArg(gpa, n, 1),
             .node_offset_builtin_call_arg2 => |n| return src_loc.byteOffsetBuiltinCallArg(gpa, n, 2),
@@ -2857,6 +2913,18 @@ pub const LazySrcLoc = union(enum) {
     /// to the type expression.
     /// The Decl is determined contextually.
     node_offset_var_decl_ty: i32,
+    /// The source location points to the alignment expression of a var decl.
+    /// The Decl is determined contextually.
+    node_offset_var_decl_align: i32,
+    /// The source location points to the linksection expression of a var decl.
+    /// The Decl is determined contextually.
+    node_offset_var_decl_section: i32,
+    /// The source location points to the addrspace expression of a var decl.
+    /// The Decl is determined contextually.
+    node_offset_var_decl_addrspace: i32,
+    /// The source location points to the initializer of a var decl.
+    /// The Decl is determined contextually.
+    node_offset_var_decl_init: i32,
     /// The source location points to a for loop condition expression,
     /// found by taking this AST node index offset from the containing
     /// Decl AST node, which points to a for loop AST node. Next, navigate
@@ -3098,6 +3166,10 @@ pub const LazySrcLoc = union(enum) {
             .node_offset,
             .node_offset_initializer,
             .node_offset_var_decl_ty,
+            .node_offset_var_decl_align,
+            .node_offset_var_decl_section,
+            .node_offset_var_decl_addrspace,
+            .node_offset_var_decl_init,
             .node_offset_for_cond,
             .node_offset_builtin_call_arg0,
             .node_offset_builtin_call_arg1,
@@ -4414,17 +4486,26 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
     const body = zir.extra[extra.end..][0..extra.data.body_len];
     const result_ref = (try sema.analyzeBodyBreak(&block_scope, body)).?.operand;
     try wip_captures.finalize();
-    const src = LazySrcLoc.nodeOffset(0);
-    const decl_tv = try sema.resolveInstValue(&block_scope, .unneeded, result_ref, undefined);
+    const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = 0 };
+    const section_src: LazySrcLoc = .{ .node_offset_var_decl_section = 0 };
+    const address_space_src: LazySrcLoc = .{ .node_offset_var_decl_addrspace = 0 };
+    const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
+    const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
+    const decl_tv = try sema.resolveInstValue(&block_scope, init_src, result_ref, undefined);
     const decl_align: u32 = blk: {
         const align_ref = decl.zirAlignRef();
         if (align_ref == .none) break :blk 0;
-        break :blk try sema.resolveAlign(&block_scope, src, align_ref);
+        break :blk try sema.resolveAlign(&block_scope, align_src, align_ref);
     };
     const decl_linksection: ?[*:0]const u8 = blk: {
         const linksection_ref = decl.zirLinksectionRef();
         if (linksection_ref == .none) break :blk null;
-        const bytes = try sema.resolveConstString(&block_scope, src, linksection_ref, "linksection must be comptime known");
+        const bytes = try sema.resolveConstString(&block_scope, section_src, linksection_ref, "linksection must be comptime known");
+        if (mem.indexOfScalar(u8, bytes, 0) != null) {
+            return sema.fail(&block_scope, section_src, "linksection cannot contain null bytes", .{});
+        } else if (bytes.len == 0) {
+            return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
+        }
         break :blk (try decl_arena_allocator.dupeZ(u8, bytes)).ptr;
     };
     const target = sema.mod.getTarget();
@@ -4442,27 +4523,27 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
                 .constant => target_util.defaultAddressSpace(target, .global_constant),
                 else => unreachable,
             },
-            else => |addrspace_ref| try sema.analyzeAddrspace(&block_scope, src, addrspace_ref, addrspace_ctx),
+            else => |addrspace_ref| try sema.analyzeAddrspace(&block_scope, address_space_src, addrspace_ref, addrspace_ctx),
         };
     };
 
     // Note this resolves the type of the Decl, not the value; if this Decl
     // is a struct, for example, this resolves `type` (which needs no resolution),
     // not the struct itself.
-    try sema.resolveTypeLayout(&block_scope, src, decl_tv.ty);
+    try sema.resolveTypeLayout(&block_scope, ty_src, decl_tv.ty);
 
     const decl_arena_state = try decl_arena_allocator.create(std.heap.ArenaAllocator.State);
 
     if (decl.is_usingnamespace) {
         if (!decl_tv.ty.eql(Type.type, mod)) {
-            return sema.fail(&block_scope, src, "expected type, found {}", .{
+            return sema.fail(&block_scope, ty_src, "expected type, found {}", .{
                 decl_tv.ty.fmt(mod),
             });
         }
         var buffer: Value.ToTypeBuffer = undefined;
         const ty = try decl_tv.val.toType(&buffer).copy(decl_arena_allocator);
         if (ty.getNamespace() == null) {
-            return sema.fail(&block_scope, src, "type {} has no namespace", .{ty.fmt(mod)});
+            return sema.fail(&block_scope, ty_src, "type {} has no namespace", .{ty.fmt(mod)});
         }
 
         decl.ty = Type.type;
@@ -4508,7 +4589,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
             decl.analysis = .complete;
             decl.generation = mod.generation;
 
-            const has_runtime_bits = try sema.fnHasRuntimeBits(&block_scope, src, decl.ty);
+            const has_runtime_bits = try sema.fnHasRuntimeBits(&block_scope, ty_src, decl.ty);
 
             if (has_runtime_bits) {
                 // We don't fully codegen the decl until later, but we do need to reserve a global
@@ -4525,7 +4606,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
 
             const is_inline = decl.ty.fnCallingConvention() == .Inline;
             if (decl.is_exported) {
-                const export_src = src; // TODO make this point at `export` token
+                const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) };
                 if (is_inline) {
                     return sema.fail(&block_scope, export_src, "export of inline function", .{});
                 }
@@ -4588,14 +4669,14 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
     decl.generation = mod.generation;
 
     const has_runtime_bits = is_extern or
-        (queue_linker_work and try sema.typeHasRuntimeBits(&block_scope, src, decl.ty));
+        (queue_linker_work and try sema.typeHasRuntimeBits(&block_scope, ty_src, decl.ty));
 
     if (has_runtime_bits) {
         log.debug("queue linker work for {*} ({s})", .{ decl, decl.name });
 
         // Needed for codegen_decl which will call updateDecl and then the
         // codegen backend wants full access to the Decl Type.
-        try sema.resolveTypeFully(&block_scope, src, decl.ty);
+        try sema.resolveTypeFully(&block_scope, ty_src, decl.ty);
 
         try mod.comp.bin_file.allocateDeclIndexes(decl_index);
         try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl_index });
@@ -4606,7 +4687,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !bool {
     }
 
     if (decl.is_exported) {
-        const export_src = src; // TODO point to the export token
+        const export_src: LazySrcLoc = .{ .token_offset = @boolToInt(decl.is_pub) };
         // The scope needs to have the decl in it.
         const options: std.builtin.ExportOptions = .{ .name = mem.sliceTo(decl.name, 0) };
         try sema.analyzeExport(&block_scope, export_src, options, decl_index);
src/Sema.zig
@@ -2971,7 +2971,7 @@ fn zirAllocExtended(
     const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
     const src = LazySrcLoc.nodeOffset(extra.data.src_node);
     const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = extra.data.src_node };
-    const align_src = src; // TODO better source location
+    const align_src: LazySrcLoc = .{ .node_offset_var_decl_align = extra.data.src_node };
     const small = @bitCast(Zir.Inst.AllocExtended.Small, extended.small);
 
     var extra_index: usize = extra.end;
@@ -8046,7 +8046,7 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
     if (try sema.resolveMaybeUndefValIntable(block, ptr_src, ptr)) |ptr_val| {
         return sema.addConstant(Type.usize, ptr_val);
     }
-    try sema.requireRuntimeBlock(block, ptr_src, ptr_src);
+    try sema.requireRuntimeBlock(block, inst_data.src(), ptr_src);
     return block.addUnOp(.ptrtoint, ptr);
 }
 
@@ -13174,8 +13174,8 @@ fn zirBuiltinSrc(
     const tracy = trace(@src());
     defer tracy.end();
 
-    const src = sema.src; // TODO better source location
-    const extra = sema.code.extraData(Zir.Inst.LineColumn, extended.operand).data;
+    const extra = sema.code.extraData(Zir.Inst.Src, extended.operand).data;
+    const src = LazySrcLoc.nodeOffset(extra.node);
     const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
     const fn_owner_decl = sema.mod.declPtr(func.owner_decl);
 
@@ -18959,10 +18959,8 @@ fn zirVarExtended(
     extended: Zir.Inst.Extended.InstData,
 ) CompileError!Air.Inst.Ref {
     const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
-    const src = sema.src;
-    const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type
-    const name_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at the name token
-    const init_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at init expr
+    const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = 0 };
+    const init_src: LazySrcLoc = .{ .node_offset_var_decl_init = 0 };
     const small = @bitCast(Zir.Inst.ExtendedVar.Small, extended.small);
 
     var extra_index: usize = extra.end;
@@ -18976,12 +18974,6 @@ fn zirVarExtended(
     // ZIR supports encoding this information but it is not used; the information
     // is encoded via the Decl entry.
     assert(!small.has_align);
-    //const align_val: Value = if (small.has_align) blk: {
-    //    const align_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
-    //    extra_index += 1;
-    //    const align_tv = try sema.resolveInstConst(block, align_src, align_ref);
-    //    break :blk align_tv.val;
-    //} else Value.@"null";
 
     const uncasted_init: Air.Inst.Ref = if (small.has_init) blk: {
         const init_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]);
@@ -19005,7 +18997,7 @@ fn zirVarExtended(
             return sema.failWithNeededComptime(block, init_src, "container level variable initializers must be comptime known");
     } else Value.initTag(.unreachable_value);
 
-    try sema.validateVarType(block, name_src, var_ty, small.is_extern);
+    try sema.validateVarType(block, ty_src, var_ty, small.is_extern);
 
     const new_var = try sema.gpa.create(Module.Var);
     errdefer sema.gpa.destroy(new_var);
src/Zir.zig
@@ -3548,6 +3548,12 @@ pub const Inst = struct {
         ty: Ref,
         init_count: u32,
     };
+
+    pub const Src = struct {
+        node: i32,
+        line: u32,
+        column: u32,
+    };
 };
 
 pub const SpecialProng = enum { none, @"else", under };
test/cases/compile_errors/stage1/obj/src_outside_function.zig
@@ -1,9 +0,0 @@
-comptime {
-    @src();
-}
-
-// error
-// backend=stage1
-// target=native
-//
-// tmp.zig:2:5: error: @src outside function
test/cases/compile_errors/stage1/obj/global_variable_alignment_non_power_of_2.zig → test/cases/compile_errors/global_variable_alignment_non_power_of_2.zig
@@ -2,7 +2,7 @@ const some_data: [100]u8 align(3) = undefined;
 export fn entry() usize { return @sizeOf(@TypeOf(some_data)); }
 
 // error
-// backend=stage1
+// backend=stage2
 // target=native
 //
-// tmp.zig:1:32: error: alignment value 3 is not a power of 2
+// :1:32: error: alignment value '3' is not a power of two
test/cases/compile_errors/src_outside_function.zig
@@ -0,0 +1,9 @@
+comptime {
+    @src();
+}
+
+// error
+// backend=stage2
+// target=native
+//
+// :2:5: error: @src outside function
test/cases/compile_errors/type_variables_must_be_constant.zig
@@ -7,5 +7,5 @@ export fn entry() foo {
 // backend=stage2
 // target=native
 //
-// :1:1: error: variable of type 'type' must be const or comptime
-// :1:1: note: types are not available at runtime
+// :1:5: error: variable of type 'type' must be const or comptime
+// :1:5: note: types are not available at runtime
test/cases/compile_errors/use_invalid_number_literal_as_array_index.zig
@@ -8,5 +8,5 @@ export fn entry() void {
 // backend=stage2
 // target=native
 //
-// :1:1: error: variable of type 'comptime_int' must be const or comptime
-// :1:1: note: to modify this variable at runtime, it must be given an explicit fixed-size number type
+// :1:5: error: variable of type 'comptime_int' must be const or comptime
+// :1:5: note: to modify this variable at runtime, it must be given an explicit fixed-size number type
test/cases/compile_errors/usingnamespace_with_wrong_type.zig
@@ -4,4 +4,4 @@ usingnamespace void;
 // backend=stage2
 // target=native
 //
-// :1:1: error: type void has no namespace
+// :1:16: error: type void has no namespace