master
1const std = @import("std");
2const mem = std.mem;
3const assert = std.debug.assert;
4const Allocator = std.mem.Allocator;
5const fs = std.fs;
6const Path = std.Build.Cache.Path;
7
8const C = @This();
9const build_options = @import("build_options");
10const Zcu = @import("../Zcu.zig");
11const Module = @import("../Package/Module.zig");
12const InternPool = @import("../InternPool.zig");
13const Alignment = InternPool.Alignment;
14const Compilation = @import("../Compilation.zig");
15const codegen = @import("../codegen/c.zig");
16const link = @import("../link.zig");
17const trace = @import("../tracy.zig").trace;
18const Type = @import("../Type.zig");
19const Value = @import("../Value.zig");
20const AnyMir = @import("../codegen.zig").AnyMir;
21
22pub const zig_h = "#include \"zig.h\"\n";
23
24base: link.File,
25/// This linker backend does not try to incrementally link output C source code.
26/// Instead, it tracks all declarations in this table, and iterates over it
27/// in the flush function, stitching pre-rendered pieces of C code together.
28navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock),
29/// All the string bytes of rendered C code, all squished into one array.
30/// While in progress, a separate buffer is used, and then when finished, the
31/// buffer is copied into this one.
32string_bytes: std.ArrayList(u8),
33/// Tracks all the anonymous decls that are used by all the decls so they can
34/// be rendered during flush().
35uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock),
36/// Sparse set of uavs that are overaligned. Underaligned anon decls are
37/// lowered the same as ABI-aligned anon decls. The keys here are a subset of
38/// the keys of `uavs`.
39aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
40
41exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock),
42exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock),
43
44/// Optimization, `updateDecl` reuses this buffer rather than creating a new
45/// one with every call.
46fwd_decl_buf: []u8,
47/// Optimization, `updateDecl` reuses this buffer rather than creating a new
48/// one with every call.
49code_header_buf: []u8,
50/// Optimization, `updateDecl` reuses this buffer rather than creating a new
51/// one with every call.
52code_buf: []u8,
53/// Optimization, `flush` reuses this buffer rather than creating a new
54/// one with every call.
55scratch_buf: []u32,
56
57/// A reference into `string_bytes`.
58const String = extern struct {
59 start: u32,
60 len: u32,
61
62 const empty: String = .{
63 .start = 0,
64 .len = 0,
65 };
66
67 fn concat(lhs: String, rhs: String) String {
68 assert(lhs.start + lhs.len == rhs.start);
69 return .{
70 .start = lhs.start,
71 .len = lhs.len + rhs.len,
72 };
73 }
74};
75
76/// Per-declaration data.
77pub const AvBlock = struct {
78 fwd_decl: String = .empty,
79 code: String = .empty,
80 /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate
81 /// over each `Decl` and generate the definition for each used `CType` once.
82 ctype_pool: codegen.CType.Pool = .empty,
83 /// May contain string references to ctype_pool
84 lazy_fns: codegen.LazyFnMap = .{},
85
86 fn deinit(ab: *AvBlock, gpa: Allocator) void {
87 ab.lazy_fns.deinit(gpa);
88 ab.ctype_pool.deinit(gpa);
89 ab.* = undefined;
90 }
91};
92
93/// Per-exported-symbol data.
94pub const ExportedBlock = struct {
95 fwd_decl: String = .empty,
96};
97
98pub fn getString(this: C, s: String) []const u8 {
99 return this.string_bytes.items[s.start..][0..s.len];
100}
101
102pub fn addString(this: *C, s: []const u8) Allocator.Error!String {
103 const comp = this.base.comp;
104 const gpa = comp.gpa;
105 try this.string_bytes.appendSlice(gpa, s);
106 return .{
107 .start = @intCast(this.string_bytes.items.len - s.len),
108 .len = @intCast(s.len),
109 };
110}
111
112pub fn open(
113 arena: Allocator,
114 comp: *Compilation,
115 emit: Path,
116 options: link.File.OpenOptions,
117) !*C {
118 return createEmpty(arena, comp, emit, options);
119}
120
121pub fn createEmpty(
122 arena: Allocator,
123 comp: *Compilation,
124 emit: Path,
125 options: link.File.OpenOptions,
126) !*C {
127 const target = &comp.root_mod.resolved_target.result;
128 assert(target.ofmt == .c);
129 const optimize_mode = comp.root_mod.optimize_mode;
130 const use_lld = build_options.have_llvm and comp.config.use_lld;
131 const use_llvm = comp.config.use_llvm;
132 const output_mode = comp.config.output_mode;
133
134 // These are caught by `Compilation.Config.resolve`.
135 assert(!use_lld);
136 assert(!use_llvm);
137
138 const file = try emit.root_dir.handle.createFile(emit.sub_path, .{
139 // Truncation is done on `flush`.
140 .truncate = false,
141 });
142 errdefer file.close();
143
144 const c_file = try arena.create(C);
145
146 c_file.* = .{
147 .base = .{
148 .tag = .c,
149 .comp = comp,
150 .emit = emit,
151 .gc_sections = options.gc_sections orelse (optimize_mode != .Debug and output_mode != .Obj),
152 .print_gc_sections = options.print_gc_sections,
153 .stack_size = options.stack_size orelse 16777216,
154 .allow_shlib_undefined = options.allow_shlib_undefined orelse false,
155 .file = file,
156 .build_id = options.build_id,
157 },
158 .navs = .empty,
159 .string_bytes = .empty,
160 .uavs = .empty,
161 .aligned_uavs = .empty,
162 .exported_navs = .empty,
163 .exported_uavs = .empty,
164 .fwd_decl_buf = &.{},
165 .code_header_buf = &.{},
166 .code_buf = &.{},
167 .scratch_buf = &.{},
168 };
169
170 return c_file;
171}
172
173pub fn deinit(self: *C) void {
174 const gpa = self.base.comp.gpa;
175
176 for (self.navs.values()) |*db| {
177 db.deinit(gpa);
178 }
179 self.navs.deinit(gpa);
180
181 for (self.uavs.values()) |*db| {
182 db.deinit(gpa);
183 }
184 self.uavs.deinit(gpa);
185 self.aligned_uavs.deinit(gpa);
186
187 self.exported_navs.deinit(gpa);
188 self.exported_uavs.deinit(gpa);
189
190 self.string_bytes.deinit(gpa);
191 gpa.free(self.fwd_decl_buf);
192 gpa.free(self.code_header_buf);
193 gpa.free(self.code_buf);
194 gpa.free(self.scratch_buf);
195}
196
197pub fn updateFunc(
198 self: *C,
199 pt: Zcu.PerThread,
200 func_index: InternPool.Index,
201 mir: *AnyMir,
202) link.File.UpdateNavError!void {
203 const zcu = pt.zcu;
204 const gpa = zcu.gpa;
205 const func = zcu.funcInfo(func_index);
206
207 const gop = try self.navs.getOrPut(gpa, func.owner_nav);
208 if (gop.found_existing) gop.value_ptr.deinit(gpa);
209 gop.value_ptr.* = .{
210 .code = .empty,
211 .fwd_decl = .empty,
212 .ctype_pool = mir.c.ctype_pool.move(),
213 .lazy_fns = mir.c.lazy_fns.move(),
214 };
215 gop.value_ptr.fwd_decl = try self.addString(mir.c.fwd_decl);
216 const code_header = try self.addString(mir.c.code_header);
217 const code = try self.addString(mir.c.code);
218 gop.value_ptr.code = code_header.concat(code);
219 try self.addUavsFromCodegen(&mir.c.uavs);
220}
221
222fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) link.File.FlushError!void {
223 const gpa = self.base.comp.gpa;
224 const uav = self.uavs.keys()[i];
225
226 var object: codegen.Object = .{
227 .dg = .{
228 .gpa = gpa,
229 .pt = pt,
230 .mod = pt.zcu.root_mod,
231 .error_msg = null,
232 .pass = .{ .uav = uav },
233 .is_naked_fn = false,
234 .expected_block = null,
235 .fwd_decl = undefined,
236 .ctype_pool = .empty,
237 .scratch = .initBuffer(self.scratch_buf),
238 .uavs = .empty,
239 },
240 .code_header = undefined,
241 .code = undefined,
242 .indent_counter = 0,
243 };
244 object.dg.fwd_decl = .initOwnedSlice(gpa, self.fwd_decl_buf);
245 object.code = .initOwnedSlice(gpa, self.code_buf);
246 defer {
247 object.dg.uavs.deinit(gpa);
248 object.dg.ctype_pool.deinit(object.dg.gpa);
249
250 self.fwd_decl_buf = object.dg.fwd_decl.toArrayList().allocatedSlice();
251 self.code_buf = object.code.toArrayList().allocatedSlice();
252 self.scratch_buf = object.dg.scratch.allocatedSlice();
253 }
254 try object.dg.ctype_pool.init(gpa);
255
256 const c_value: codegen.CValue = .{ .constant = Value.fromInterned(uav) };
257 const alignment: Alignment = self.aligned_uavs.get(uav) orelse .none;
258 codegen.genDeclValue(&object, c_value.constant, c_value, alignment, .none) catch |err| switch (err) {
259 error.AnalysisFail => {
260 @panic("TODO: C backend AnalysisFail on anonymous decl");
261 //try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
262 //return;
263 },
264 error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
265 };
266
267 try self.addUavsFromCodegen(&object.dg.uavs);
268
269 object.dg.ctype_pool.freeUnusedCapacity(gpa);
270 self.uavs.values()[i] = .{
271 .fwd_decl = try self.addString(object.dg.fwd_decl.written()),
272 .code = try self.addString(object.code.written()),
273 .ctype_pool = object.dg.ctype_pool.move(),
274 };
275}
276
277pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) link.File.UpdateNavError!void {
278 const tracy = trace(@src());
279 defer tracy.end();
280
281 const gpa = self.base.comp.gpa;
282 const zcu = pt.zcu;
283 const ip = &zcu.intern_pool;
284
285 const nav = ip.getNav(nav_index);
286 const nav_init = switch (ip.indexToKey(nav.status.fully_resolved.val)) {
287 .func => return,
288 .@"extern" => .none,
289 .variable => |variable| variable.init,
290 else => nav.status.fully_resolved.val,
291 };
292 if (nav_init != .none and !Value.fromInterned(nav_init).typeOf(zcu).hasRuntimeBits(zcu)) return;
293
294 const gop = try self.navs.getOrPut(gpa, nav_index);
295 errdefer _ = self.navs.pop();
296 if (!gop.found_existing) gop.value_ptr.* = .{};
297 const ctype_pool = &gop.value_ptr.ctype_pool;
298 try ctype_pool.init(gpa);
299 ctype_pool.clearRetainingCapacity();
300
301 var object: codegen.Object = .{
302 .dg = .{
303 .gpa = gpa,
304 .pt = pt,
305 .mod = zcu.navFileScope(nav_index).mod.?,
306 .error_msg = null,
307 .pass = .{ .nav = nav_index },
308 .is_naked_fn = false,
309 .expected_block = null,
310 .fwd_decl = undefined,
311 .ctype_pool = ctype_pool.*,
312 .scratch = .initBuffer(self.scratch_buf),
313 .uavs = .empty,
314 },
315 .code_header = undefined,
316 .code = undefined,
317 .indent_counter = 0,
318 };
319 object.dg.fwd_decl = .initOwnedSlice(gpa, self.fwd_decl_buf);
320 object.code = .initOwnedSlice(gpa, self.code_buf);
321 defer {
322 object.dg.uavs.deinit(gpa);
323 ctype_pool.* = object.dg.ctype_pool.move();
324 ctype_pool.freeUnusedCapacity(gpa);
325
326 self.fwd_decl_buf = object.dg.fwd_decl.toArrayList().allocatedSlice();
327 self.code_buf = object.code.toArrayList().allocatedSlice();
328 self.scratch_buf = object.dg.scratch.allocatedSlice();
329 }
330
331 codegen.genDecl(&object) catch |err| switch (err) {
332 error.AnalysisFail => switch (zcu.codegenFailMsg(nav_index, object.dg.error_msg.?)) {
333 error.CodegenFail => return,
334 error.OutOfMemory => |e| return e,
335 },
336 error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
337 };
338 gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.written());
339 gop.value_ptr.code = try self.addString(object.code.written());
340 try self.addUavsFromCodegen(&object.dg.uavs);
341}
342
343pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
344 // The C backend does not have the ability to fix line numbers without re-generating
345 // the entire Decl.
346 _ = self;
347 _ = pt;
348 _ = ti_id;
349}
350
351fn abiDefines(w: *std.Io.Writer, target: *const std.Target) !void {
352 switch (target.abi) {
353 .msvc, .itanium => try w.writeAll("#define ZIG_TARGET_ABI_MSVC\n"),
354 else => {},
355 }
356 try w.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{
357 target.cMaxIntAlignment(),
358 });
359}
360
361pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
362 _ = arena; // Has the same lifetime as the call to Compilation.update.
363
364 const tracy = trace(@src());
365 defer tracy.end();
366
367 const sub_prog_node = prog_node.start("Flush Module", 0);
368 defer sub_prog_node.end();
369
370 const comp = self.base.comp;
371 const diags = &comp.link_diags;
372 const gpa = comp.gpa;
373 const zcu = self.base.comp.zcu.?;
374 const ip = &zcu.intern_pool;
375 const pt: Zcu.PerThread = .activate(zcu, tid);
376 defer pt.deactivate();
377
378 {
379 var i: usize = 0;
380 while (i < self.uavs.count()) : (i += 1) {
381 try self.updateUav(pt, i);
382 }
383 }
384
385 // This code path happens exclusively with -ofmt=c. The flush logic for
386 // emit-h is in `flushEmitH` below.
387
388 var f: Flush = .{
389 .ctype_pool = .empty,
390 .ctype_global_from_decl_map = .empty,
391 .ctypes = .empty,
392
393 .lazy_ctype_pool = .empty,
394 .lazy_fns = .empty,
395 .lazy_fwd_decl = .empty,
396 .lazy_code = .empty,
397
398 .all_buffers = .empty,
399 .file_size = 0,
400 };
401 defer f.deinit(gpa);
402
403 var abi_defines_aw: std.Io.Writer.Allocating = .init(gpa);
404 defer abi_defines_aw.deinit();
405 abiDefines(&abi_defines_aw.writer, zcu.getTarget()) catch |err| switch (err) {
406 error.WriteFailed => return error.OutOfMemory,
407 };
408
409 // Covers defines, zig.h, ctypes, asm, lazy fwd.
410 try f.all_buffers.ensureUnusedCapacity(gpa, 5);
411
412 f.appendBufAssumeCapacity(abi_defines_aw.written());
413 f.appendBufAssumeCapacity(zig_h);
414
415 const ctypes_index = f.all_buffers.items.len;
416 f.all_buffers.items.len += 1;
417
418 var asm_aw: std.Io.Writer.Allocating = .init(gpa);
419 defer asm_aw.deinit();
420 codegen.genGlobalAsm(zcu, &asm_aw.writer) catch |err| switch (err) {
421 error.WriteFailed => return error.OutOfMemory,
422 };
423 f.appendBufAssumeCapacity(asm_aw.written());
424
425 const lazy_index = f.all_buffers.items.len;
426 f.all_buffers.items.len += 1;
427
428 try f.lazy_ctype_pool.init(gpa);
429 try self.flushErrDecls(pt, &f);
430
431 // Unlike other backends, the .c code we are emitting has order-dependent decls.
432 // `CType`s, forward decls, and non-functions first.
433
434 {
435 var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
436 defer export_names.deinit(gpa);
437 try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count()));
438 for (zcu.single_exports.values()) |export_index| {
439 export_names.putAssumeCapacity(export_index.ptr(zcu).opts.name, {});
440 }
441 for (zcu.multi_exports.values()) |info| {
442 try export_names.ensureUnusedCapacity(gpa, info.len);
443 for (zcu.all_exports.items[info.index..][0..info.len]) |@"export"| {
444 export_names.putAssumeCapacity(@"export".opts.name, {});
445 }
446 }
447
448 for (self.uavs.keys(), self.uavs.values()) |uav, *av_block| try self.flushAvBlock(
449 pt,
450 zcu.root_mod,
451 &f,
452 av_block,
453 self.exported_uavs.getPtr(uav),
454 export_names,
455 .none,
456 );
457
458 for (self.navs.keys(), self.navs.values()) |nav, *av_block| try self.flushAvBlock(
459 pt,
460 zcu.navFileScope(nav).mod.?,
461 &f,
462 av_block,
463 self.exported_navs.getPtr(nav),
464 export_names,
465 if (ip.getNav(nav).getExtern(ip) != null)
466 ip.getNav(nav).name.toOptional()
467 else
468 .none,
469 );
470 }
471
472 {
473 // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes.
474 // This ensures that every lazy CType.Index exactly matches the global CType.Index.
475 try f.ctype_pool.init(gpa);
476 try self.flushCTypes(zcu, &f, .flush, &f.lazy_ctype_pool);
477
478 for (self.uavs.keys(), self.uavs.values()) |uav, av_block| {
479 try self.flushCTypes(zcu, &f, .{ .uav = uav }, &av_block.ctype_pool);
480 }
481
482 for (self.navs.keys(), self.navs.values()) |nav, av_block| {
483 try self.flushCTypes(zcu, &f, .{ .nav = nav }, &av_block.ctype_pool);
484 }
485 }
486
487 f.all_buffers.items[ctypes_index] = f.ctypes.items;
488 f.file_size += f.ctypes.items.len;
489
490 f.all_buffers.items[lazy_index] = f.lazy_fwd_decl.items;
491 f.file_size += f.lazy_fwd_decl.items.len;
492
493 // Now the code.
494 try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.uavs.count() + self.navs.count()) * 2);
495 f.appendBufAssumeCapacity(f.lazy_code.items);
496 for (self.uavs.keys(), self.uavs.values()) |uav, av_block| f.appendCodeAssumeCapacity(
497 if (self.exported_uavs.contains(uav)) .default else switch (ip.indexToKey(uav)) {
498 .@"extern" => .zig_extern,
499 else => .static,
500 },
501 self.getString(av_block.code),
502 );
503 for (self.navs.keys(), self.navs.values()) |nav, av_block| f.appendCodeAssumeCapacity(storage: {
504 if (self.exported_navs.contains(nav)) break :storage .default;
505 if (ip.getNav(nav).getExtern(ip) != null) break :storage .zig_extern;
506 break :storage .static;
507 }, self.getString(av_block.code));
508
509 const file = self.base.file.?;
510 file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)});
511 var fw = file.writer(&.{});
512 var w = &fw.interface;
513 w.writeVecAll(f.all_buffers.items) catch |err| switch (err) {
514 error.WriteFailed => return diags.fail("failed to write to '{f}': {s}", .{
515 std.fmt.alt(self.base.emit, .formatEscapeChar), @errorName(fw.err.?),
516 }),
517 };
518}
519
520const Flush = struct {
521 ctype_pool: codegen.CType.Pool,
522 ctype_global_from_decl_map: std.ArrayList(codegen.CType),
523 ctypes: std.ArrayList(u8),
524
525 lazy_ctype_pool: codegen.CType.Pool,
526 lazy_fns: LazyFns,
527 lazy_fwd_decl: std.ArrayList(u8),
528 lazy_code: std.ArrayList(u8),
529
530 /// We collect a list of buffers to write, and write them all at once with pwritev 😎
531 all_buffers: std.ArrayList([]const u8),
532 /// Keeps track of the total bytes of `all_buffers`.
533 file_size: u64,
534
535 const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void);
536
537 fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void {
538 if (buf.len == 0) return;
539 f.all_buffers.appendAssumeCapacity(buf);
540 f.file_size += buf.len;
541 }
542
543 fn appendCodeAssumeCapacity(f: *Flush, storage: enum { default, zig_extern, static }, code: []const u8) void {
544 if (code.len == 0) return;
545 f.appendBufAssumeCapacity(switch (storage) {
546 .default => "\n",
547 .zig_extern => "\nzig_extern ",
548 .static => "\nstatic ",
549 });
550 f.appendBufAssumeCapacity(code);
551 }
552
553 fn deinit(f: *Flush, gpa: Allocator) void {
554 f.ctype_pool.deinit(gpa);
555 assert(f.ctype_global_from_decl_map.items.len == 0);
556 f.ctype_global_from_decl_map.deinit(gpa);
557 f.ctypes.deinit(gpa);
558 f.lazy_ctype_pool.deinit(gpa);
559 f.lazy_fns.deinit(gpa);
560 f.lazy_fwd_decl.deinit(gpa);
561 f.lazy_code.deinit(gpa);
562 f.all_buffers.deinit(gpa);
563 }
564};
565
566const FlushDeclError = error{
567 OutOfMemory,
568};
569
570fn flushCTypes(
571 self: *C,
572 zcu: *Zcu,
573 f: *Flush,
574 pass: codegen.DeclGen.Pass,
575 decl_ctype_pool: *const codegen.CType.Pool,
576) FlushDeclError!void {
577 const gpa = self.base.comp.gpa;
578 const global_ctype_pool = &f.ctype_pool;
579
580 const global_from_decl_map = &f.ctype_global_from_decl_map;
581 assert(global_from_decl_map.items.len == 0);
582 try global_from_decl_map.ensureTotalCapacity(gpa, decl_ctype_pool.items.len);
583 defer global_from_decl_map.clearRetainingCapacity();
584
585 var ctypes_aw: std.Io.Writer.Allocating = .fromArrayList(gpa, &f.ctypes);
586 const ctypes_bw = &ctypes_aw.writer;
587 defer f.ctypes = ctypes_aw.toArrayList();
588
589 for (0..decl_ctype_pool.items.len) |decl_ctype_pool_index| {
590 const PoolAdapter = struct {
591 global_from_decl_map: []const codegen.CType,
592 pub fn eql(pool_adapter: @This(), decl_ctype: codegen.CType, global_ctype: codegen.CType) bool {
593 return if (decl_ctype.toPoolIndex()) |decl_pool_index|
594 decl_pool_index < pool_adapter.global_from_decl_map.len and
595 pool_adapter.global_from_decl_map[decl_pool_index].eql(global_ctype)
596 else
597 decl_ctype.index == global_ctype.index;
598 }
599 pub fn copy(pool_adapter: @This(), decl_ctype: codegen.CType) codegen.CType {
600 return if (decl_ctype.toPoolIndex()) |decl_pool_index|
601 pool_adapter.global_from_decl_map[decl_pool_index]
602 else
603 decl_ctype;
604 }
605 };
606 const decl_ctype = codegen.CType.fromPoolIndex(decl_ctype_pool_index);
607 const global_ctype, const found_existing = try global_ctype_pool.getOrPutAdapted(
608 gpa,
609 decl_ctype_pool,
610 decl_ctype,
611 PoolAdapter{ .global_from_decl_map = global_from_decl_map.items },
612 );
613 global_from_decl_map.appendAssumeCapacity(global_ctype);
614 codegen.genTypeDecl(
615 zcu,
616 ctypes_bw,
617 global_ctype_pool,
618 global_ctype,
619 pass,
620 decl_ctype_pool,
621 decl_ctype,
622 found_existing,
623 ) catch |err| switch (err) {
624 error.WriteFailed => return error.OutOfMemory,
625 };
626 }
627}
628
629fn flushErrDecls(self: *C, pt: Zcu.PerThread, f: *Flush) FlushDeclError!void {
630 const gpa = self.base.comp.gpa;
631
632 var object: codegen.Object = .{
633 .dg = .{
634 .gpa = gpa,
635 .pt = pt,
636 .mod = pt.zcu.root_mod,
637 .error_msg = null,
638 .pass = .flush,
639 .is_naked_fn = false,
640 .expected_block = null,
641 .fwd_decl = undefined,
642 .ctype_pool = f.lazy_ctype_pool,
643 .scratch = .initBuffer(self.scratch_buf),
644 .uavs = .empty,
645 },
646 .code_header = undefined,
647 .code = undefined,
648 .indent_counter = 0,
649 };
650 object.dg.fwd_decl = .fromArrayList(gpa, &f.lazy_fwd_decl);
651 object.code = .fromArrayList(gpa, &f.lazy_code);
652 defer {
653 object.dg.uavs.deinit(gpa);
654 f.lazy_ctype_pool = object.dg.ctype_pool.move();
655 f.lazy_ctype_pool.freeUnusedCapacity(gpa);
656
657 f.lazy_fwd_decl = object.dg.fwd_decl.toArrayList();
658 f.lazy_code = object.code.toArrayList();
659 self.scratch_buf = object.dg.scratch.allocatedSlice();
660 }
661
662 codegen.genErrDecls(&object) catch |err| switch (err) {
663 error.AnalysisFail => unreachable,
664 error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
665 };
666
667 try self.addUavsFromCodegen(&object.dg.uavs);
668}
669
670fn flushLazyFn(
671 self: *C,
672 pt: Zcu.PerThread,
673 mod: *Module,
674 f: *Flush,
675 lazy_ctype_pool: *const codegen.CType.Pool,
676 lazy_fn: codegen.LazyFnMap.Entry,
677) FlushDeclError!void {
678 const gpa = self.base.comp.gpa;
679
680 var object: codegen.Object = .{
681 .dg = .{
682 .gpa = gpa,
683 .pt = pt,
684 .mod = mod,
685 .error_msg = null,
686 .pass = .flush,
687 .is_naked_fn = false,
688 .expected_block = null,
689 .fwd_decl = undefined,
690 .ctype_pool = f.lazy_ctype_pool,
691 .scratch = .initBuffer(self.scratch_buf),
692 .uavs = .empty,
693 },
694 .code_header = undefined,
695 .code = undefined,
696 .indent_counter = 0,
697 };
698 object.dg.fwd_decl = .fromArrayList(gpa, &f.lazy_fwd_decl);
699 object.code = .fromArrayList(gpa, &f.lazy_code);
700 defer {
701 // If this assert trips just handle the anon_decl_deps the same as
702 // `updateFunc()` does.
703 assert(object.dg.uavs.count() == 0);
704 f.lazy_ctype_pool = object.dg.ctype_pool.move();
705 f.lazy_ctype_pool.freeUnusedCapacity(gpa);
706
707 f.lazy_fwd_decl = object.dg.fwd_decl.toArrayList();
708 f.lazy_code = object.code.toArrayList();
709 self.scratch_buf = object.dg.scratch.allocatedSlice();
710 }
711
712 codegen.genLazyFn(&object, lazy_ctype_pool, lazy_fn) catch |err| switch (err) {
713 error.AnalysisFail => unreachable,
714 error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
715 };
716}
717
718fn flushLazyFns(
719 self: *C,
720 pt: Zcu.PerThread,
721 mod: *Module,
722 f: *Flush,
723 lazy_ctype_pool: *const codegen.CType.Pool,
724 lazy_fns: codegen.LazyFnMap,
725) FlushDeclError!void {
726 const gpa = self.base.comp.gpa;
727 try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(lazy_fns.count()));
728
729 var it = lazy_fns.iterator();
730 while (it.next()) |entry| {
731 const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
732 if (gop.found_existing) continue;
733 gop.value_ptr.* = {};
734 try self.flushLazyFn(pt, mod, f, lazy_ctype_pool, entry);
735 }
736}
737
738fn flushAvBlock(
739 self: *C,
740 pt: Zcu.PerThread,
741 mod: *Module,
742 f: *Flush,
743 av_block: *const AvBlock,
744 exported_block: ?*const ExportedBlock,
745 export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
746 extern_name: InternPool.OptionalNullTerminatedString,
747) FlushDeclError!void {
748 const gpa = self.base.comp.gpa;
749 try self.flushLazyFns(pt, mod, f, &av_block.ctype_pool, av_block.lazy_fns);
750 try f.all_buffers.ensureUnusedCapacity(gpa, 1);
751 // avoid emitting extern decls that are already exported
752 if (extern_name.unwrap()) |name| if (export_names.contains(name)) return;
753 f.appendBufAssumeCapacity(self.getString(if (exported_block) |exported|
754 exported.fwd_decl
755 else
756 av_block.fwd_decl));
757}
758
759pub fn flushEmitH(zcu: *Zcu) !void {
760 const tracy = trace(@src());
761 defer tracy.end();
762
763 if (true) return; // emit-h is regressed
764
765 const emit_h = zcu.emit_h orelse return;
766
767 // We collect a list of buffers to write, and write them all at once with pwritev 😎
768 const num_buffers = emit_h.decl_table.count() + 1;
769 var all_buffers = try std.array_list.Managed(std.posix.iovec_const).initCapacity(zcu.gpa, num_buffers);
770 defer all_buffers.deinit();
771
772 var file_size: u64 = zig_h.len;
773 if (zig_h.len != 0) {
774 all_buffers.appendAssumeCapacity(.{
775 .base = zig_h,
776 .len = zig_h.len,
777 });
778 }
779
780 for (emit_h.decl_table.keys()) |decl_index| {
781 const decl_emit_h = emit_h.declPtr(decl_index);
782 const buf = decl_emit_h.fwd_decl.items;
783 if (buf.len != 0) {
784 all_buffers.appendAssumeCapacity(.{
785 .base = buf.ptr,
786 .len = buf.len,
787 });
788 file_size += buf.len;
789 }
790 }
791
792 const directory = emit_h.loc.directory orelse zcu.comp.local_cache_directory;
793 const file = try directory.handle.createFile(emit_h.loc.basename, .{
794 // We set the end position explicitly below; by not truncating the file, we possibly
795 // make it easier on the file system by doing 1 reallocation instead of two.
796 .truncate = false,
797 });
798 defer file.close();
799
800 try file.setEndPos(file_size);
801 try file.pwritevAll(all_buffers.items, 0);
802}
803
804pub fn updateExports(
805 self: *C,
806 pt: Zcu.PerThread,
807 exported: Zcu.Exported,
808 export_indices: []const Zcu.Export.Index,
809) !void {
810 const zcu = pt.zcu;
811 const gpa = zcu.gpa;
812 const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) {
813 .nav => |nav| .{
814 zcu.navFileScope(nav).mod.?,
815 .{ .nav = nav },
816 self.navs.getPtr(nav).?,
817 (try self.exported_navs.getOrPut(gpa, nav)).value_ptr,
818 },
819 .uav => |uav| .{
820 zcu.root_mod,
821 .{ .uav = uav },
822 self.uavs.getPtr(uav).?,
823 (try self.exported_uavs.getOrPut(gpa, uav)).value_ptr,
824 },
825 };
826 const ctype_pool = &decl_block.ctype_pool;
827 var dg: codegen.DeclGen = .{
828 .gpa = gpa,
829 .pt = pt,
830 .mod = mod,
831 .error_msg = null,
832 .pass = pass,
833 .is_naked_fn = false,
834 .expected_block = null,
835 .fwd_decl = undefined,
836 .ctype_pool = decl_block.ctype_pool,
837 .scratch = .initBuffer(self.scratch_buf),
838 .uavs = .empty,
839 };
840 dg.fwd_decl = .initOwnedSlice(gpa, self.fwd_decl_buf);
841 defer {
842 assert(dg.uavs.count() == 0);
843 ctype_pool.* = dg.ctype_pool.move();
844 ctype_pool.freeUnusedCapacity(gpa);
845
846 self.fwd_decl_buf = dg.fwd_decl.toArrayList().allocatedSlice();
847 self.scratch_buf = dg.scratch.allocatedSlice();
848 }
849 codegen.genExports(&dg, exported, export_indices) catch |err| switch (err) {
850 error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
851 };
852 exported_block.* = .{ .fwd_decl = try self.addString(dg.fwd_decl.written()) };
853}
854
855pub fn deleteExport(
856 self: *C,
857 exported: Zcu.Exported,
858 _: InternPool.NullTerminatedString,
859) void {
860 switch (exported) {
861 .nav => |nav| _ = self.exported_navs.swapRemove(nav),
862 .uav => |uav| _ = self.exported_uavs.swapRemove(uav),
863 }
864}
865
866fn addUavsFromCodegen(c: *C, uavs: *const std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment)) Allocator.Error!void {
867 const gpa = c.base.comp.gpa;
868 try c.uavs.ensureUnusedCapacity(gpa, uavs.count());
869 try c.aligned_uavs.ensureUnusedCapacity(gpa, uavs.count());
870 for (uavs.keys(), uavs.values()) |uav_val, uav_align| {
871 {
872 const gop = c.uavs.getOrPutAssumeCapacity(uav_val);
873 if (!gop.found_existing) gop.value_ptr.* = .{};
874 }
875 if (uav_align != .none) {
876 const gop = c.aligned_uavs.getOrPutAssumeCapacity(uav_val);
877 gop.value_ptr.* = if (gop.found_existing) max: {
878 break :max gop.value_ptr.*.maxStrict(uav_align);
879 } else uav_align;
880 }
881 }
882}