master
1const builtin = @import("builtin");
2const native_endian = builtin.cpu.arch.endian();
3
4const std = @import("std");
5const Io = std.Io;
6const Allocator = std.mem.Allocator;
7const WORD = std.os.windows.WORD;
8const DWORD = std.os.windows.DWORD;
9
10const Node = @import("ast.zig").Node;
11const lex = @import("lex.zig");
12const Parser = @import("parse.zig").Parser;
13const ResourceType = @import("rc.zig").ResourceType;
14const Token = @import("lex.zig").Token;
15const literals = @import("literals.zig");
16const Number = literals.Number;
17const SourceBytes = literals.SourceBytes;
18const Diagnostics = @import("errors.zig").Diagnostics;
19const ErrorDetails = @import("errors.zig").ErrorDetails;
20const MemoryFlags = @import("res.zig").MemoryFlags;
21const rc = @import("rc.zig");
22const res = @import("res.zig");
23const ico = @import("ico.zig");
24const ani = @import("ani.zig");
25const bmp = @import("bmp.zig");
26const utils = @import("utils.zig");
27const NameOrOrdinal = res.NameOrOrdinal;
28const SupportedCodePage = @import("code_pages.zig").SupportedCodePage;
29const CodePageLookup = @import("ast.zig").CodePageLookup;
30const SourceMappings = @import("source_mapping.zig").SourceMappings;
31const windows1252 = @import("windows1252.zig");
32const lang = @import("lang.zig");
33const code_pages = @import("code_pages.zig");
34const errors = @import("errors.zig");
35
36pub const CompileOptions = struct {
37 cwd: std.fs.Dir,
38 diagnostics: *Diagnostics,
39 source_mappings: ?*SourceMappings = null,
40 /// List of paths (absolute or relative to `cwd`) for every file that the resources within the .rc file depend on.
41 dependencies: ?*Dependencies = null,
42 default_code_page: SupportedCodePage = .windows1252,
43 /// If true, the first #pragma code_page directive only sets the input code page, but not the output code page.
44 /// This check must be done before comments are removed from the file.
45 disjoint_code_page: bool = false,
46 ignore_include_env_var: bool = false,
47 extra_include_paths: []const []const u8 = &.{},
48 /// This is just an API convenience to allow separately passing 'system' (i.e. those
49 /// that would normally be gotten from the INCLUDE env var) include paths. This is mostly
50 /// intended for use when setting `ignore_include_env_var = true`. When `ignore_include_env_var`
51 /// is false, `system_include_paths` will be searched before the paths in the INCLUDE env var.
52 system_include_paths: []const []const u8 = &.{},
53 default_language_id: ?u16 = null,
54 // TODO: Implement verbose output
55 verbose: bool = false,
56 null_terminate_string_table_strings: bool = false,
57 /// Note: This is a u15 to ensure that the maximum number of UTF-16 code units
58 /// plus a null-terminator can always fit into a u16.
59 max_string_literal_codepoints: u15 = lex.default_max_string_literal_codepoints,
60 silent_duplicate_control_ids: bool = false,
61 warn_instead_of_error_on_invalid_code_page: bool = false,
62};
63
64pub const Dependencies = struct {
65 list: std.ArrayList([]const u8),
66 allocator: Allocator,
67
68 pub fn init(allocator: Allocator) Dependencies {
69 return .{
70 .list = .empty,
71 .allocator = allocator,
72 };
73 }
74
75 pub fn deinit(self: *Dependencies) void {
76 for (self.list.items) |item| {
77 self.allocator.free(item);
78 }
79 self.list.deinit(self.allocator);
80 }
81};
82
83pub fn compile(allocator: Allocator, io: Io, source: []const u8, writer: *std.Io.Writer, options: CompileOptions) !void {
84 var lexer = lex.Lexer.init(source, .{
85 .default_code_page = options.default_code_page,
86 .source_mappings = options.source_mappings,
87 .max_string_literal_codepoints = options.max_string_literal_codepoints,
88 });
89 var parser = Parser.init(&lexer, .{
90 .warn_instead_of_error_on_invalid_code_page = options.warn_instead_of_error_on_invalid_code_page,
91 .disjoint_code_page = options.disjoint_code_page,
92 });
93 var tree = try parser.parse(allocator, options.diagnostics);
94 defer tree.deinit();
95
96 var search_dirs: std.ArrayList(SearchDir) = .empty;
97 defer {
98 for (search_dirs.items) |*search_dir| {
99 search_dir.deinit(allocator);
100 }
101 search_dirs.deinit(allocator);
102 }
103
104 if (options.source_mappings) |source_mappings| {
105 const root_path = source_mappings.files.get(source_mappings.root_filename_offset);
106 // If dirname returns null, then the root path will be the same as
107 // the cwd so we don't need to add it as a distinct search path.
108 if (std.fs.path.dirname(root_path)) |root_dir_path| {
109 var root_dir = try options.cwd.openDir(root_dir_path, .{});
110 errdefer root_dir.close();
111 try search_dirs.append(allocator, .{ .dir = root_dir, .path = try allocator.dupe(u8, root_dir_path) });
112 }
113 }
114 // Re-open the passed in cwd since we want to be able to close it (std.fs.cwd() shouldn't be closed)
115 const cwd_dir = options.cwd.openDir(".", .{}) catch |err| {
116 try options.diagnostics.append(.{
117 .err = .failed_to_open_cwd,
118 .token = .{
119 .id = .invalid,
120 .start = 0,
121 .end = 0,
122 .line_number = 1,
123 },
124 .code_page = .utf8,
125 .print_source_line = false,
126 .extra = .{ .file_open_error = .{
127 .err = ErrorDetails.FileOpenError.enumFromError(err),
128 .filename_string_index = undefined,
129 } },
130 });
131 return error.CompileError;
132 };
133 try search_dirs.append(allocator, .{ .dir = cwd_dir, .path = null });
134 for (options.extra_include_paths) |extra_include_path| {
135 var dir = openSearchPathDir(options.cwd, extra_include_path) catch {
136 // TODO: maybe a warning that the search path is skipped?
137 continue;
138 };
139 errdefer dir.close();
140 try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, extra_include_path) });
141 }
142 for (options.system_include_paths) |system_include_path| {
143 var dir = openSearchPathDir(options.cwd, system_include_path) catch {
144 // TODO: maybe a warning that the search path is skipped?
145 continue;
146 };
147 errdefer dir.close();
148 try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, system_include_path) });
149 }
150 if (!options.ignore_include_env_var) {
151 const INCLUDE = std.process.getEnvVarOwned(allocator, "INCLUDE") catch "";
152 defer allocator.free(INCLUDE);
153
154 // The only precedence here is llvm-rc which also uses the platform-specific
155 // delimiter. There's no precedence set by `rc.exe` since it's Windows-only.
156 const delimiter = switch (builtin.os.tag) {
157 .windows => ';',
158 else => ':',
159 };
160 var it = std.mem.tokenizeScalar(u8, INCLUDE, delimiter);
161 while (it.next()) |search_path| {
162 var dir = openSearchPathDir(options.cwd, search_path) catch continue;
163 errdefer dir.close();
164 try search_dirs.append(allocator, .{ .dir = dir, .path = try allocator.dupe(u8, search_path) });
165 }
166 }
167
168 var arena_allocator = std.heap.ArenaAllocator.init(allocator);
169 defer arena_allocator.deinit();
170 const arena = arena_allocator.allocator();
171
172 var compiler: Compiler = .{
173 .source = source,
174 .arena = arena,
175 .allocator = allocator,
176 .io = io,
177 .cwd = options.cwd,
178 .diagnostics = options.diagnostics,
179 .dependencies = options.dependencies,
180 .input_code_pages = &tree.input_code_pages,
181 .output_code_pages = &tree.output_code_pages,
182 // This is only safe because we know search_dirs won't be modified past this point
183 .search_dirs = search_dirs.items,
184 .null_terminate_string_table_strings = options.null_terminate_string_table_strings,
185 .silent_duplicate_control_ids = options.silent_duplicate_control_ids,
186 };
187 if (options.default_language_id) |default_language_id| {
188 compiler.state.language = res.Language.fromInt(default_language_id);
189 }
190
191 try compiler.writeRoot(tree.root(), writer);
192}
193
194pub const Compiler = struct {
195 source: []const u8,
196 arena: Allocator,
197 allocator: Allocator,
198 io: Io,
199 cwd: std.fs.Dir,
200 state: State = .{},
201 diagnostics: *Diagnostics,
202 dependencies: ?*Dependencies,
203 input_code_pages: *const CodePageLookup,
204 output_code_pages: *const CodePageLookup,
205 search_dirs: []SearchDir,
206 null_terminate_string_table_strings: bool,
207 silent_duplicate_control_ids: bool,
208
209 pub const State = struct {
210 icon_id: u16 = 1,
211 string_tables: StringTablesByLanguage = .{},
212 language: res.Language = .{},
213 font_dir: FontDir = .{},
214 version: u32 = 0,
215 characteristics: u32 = 0,
216 };
217
218 pub fn writeRoot(self: *Compiler, root: *Node.Root, writer: *std.Io.Writer) !void {
219 try writeEmptyResource(writer);
220 for (root.body) |node| {
221 try self.writeNode(node, writer);
222 }
223
224 // now write the FONTDIR (if it has anything in it)
225 try self.state.font_dir.writeResData(self, writer);
226 if (self.state.font_dir.fonts.items.len != 0) {
227 // The Win32 RC compiler may write a different FONTDIR resource than us,
228 // due to it sometimes writing a non-zero-length device name/face name
229 // whereas we *always* write them both as zero-length.
230 //
231 // In practical terms, this doesn't matter, since for various reasons the format
232 // of the FONTDIR cannot be relied on and is seemingly not actually used by anything
233 // anymore. We still want to emit some sort of diagnostic for the purposes of being able
234 // to know that our .RES is intentionally not meant to be byte-for-byte identical with
235 // the rc.exe output.
236 //
237 // By using the hint type here, we allow this diagnostic to be detected in code,
238 // but it will not be printed since the end-user doesn't need to care.
239 try self.addErrorDetails(.{
240 .err = .result_contains_fontdir,
241 .type = .hint,
242 .token = .{
243 .id = .invalid,
244 .start = 0,
245 .end = 0,
246 .line_number = 1,
247 },
248 });
249 }
250 // once we've written every else out, we can write out the finalized STRINGTABLE resources
251 var string_tables_it = self.state.string_tables.tables.iterator();
252 while (string_tables_it.next()) |string_table_entry| {
253 var string_table_it = string_table_entry.value_ptr.blocks.iterator();
254 while (string_table_it.next()) |entry| {
255 try entry.value_ptr.writeResData(self, string_table_entry.key_ptr.*, entry.key_ptr.*, writer);
256 }
257 }
258 }
259
260 pub fn writeNode(self: *Compiler, node: *Node, writer: *std.Io.Writer) !void {
261 switch (node.id) {
262 .root => unreachable, // writeRoot should be called directly instead
263 .resource_external => try self.writeResourceExternal(@alignCast(@fieldParentPtr("base", node)), writer),
264 .resource_raw_data => try self.writeResourceRawData(@alignCast(@fieldParentPtr("base", node)), writer),
265 .literal => unreachable, // this is context dependent and should be handled by its parent
266 .binary_expression => unreachable,
267 .grouped_expression => unreachable,
268 .not_expression => unreachable,
269 .invalid => {}, // no-op, currently only used for dangling literals at EOF
270 .accelerators => try self.writeAccelerators(@alignCast(@fieldParentPtr("base", node)), writer),
271 .accelerator => unreachable, // handled by writeAccelerators
272 .dialog => try self.writeDialog(@alignCast(@fieldParentPtr("base", node)), writer),
273 .control_statement => unreachable,
274 .toolbar => try self.writeToolbar(@alignCast(@fieldParentPtr("base", node)), writer),
275 .menu => try self.writeMenu(@alignCast(@fieldParentPtr("base", node)), writer),
276 .menu_item => unreachable,
277 .menu_item_separator => unreachable,
278 .menu_item_ex => unreachable,
279 .popup => unreachable,
280 .popup_ex => unreachable,
281 .version_info => try self.writeVersionInfo(@alignCast(@fieldParentPtr("base", node)), writer),
282 .version_statement => unreachable,
283 .block => unreachable,
284 .block_value => unreachable,
285 .block_value_value => unreachable,
286 .string_table => try self.writeStringTable(@alignCast(@fieldParentPtr("base", node))),
287 .string_table_string => unreachable, // handled by writeStringTable
288 .language_statement => self.writeLanguageStatement(@alignCast(@fieldParentPtr("base", node))),
289 .font_statement => unreachable,
290 .simple_statement => self.writeTopLevelSimpleStatement(@alignCast(@fieldParentPtr("base", node))),
291 }
292 }
293
294 /// Returns the filename encoded as UTF-8 (allocated by self.allocator)
295 pub fn evaluateFilenameExpression(self: *Compiler, expression_node: *Node) ![]u8 {
296 switch (expression_node.id) {
297 .literal => {
298 const literal_node = expression_node.cast(.literal).?;
299 switch (literal_node.token.id) {
300 .literal, .number => {
301 const slice = literal_node.token.slice(self.source);
302 const code_page = self.input_code_pages.getForToken(literal_node.token);
303 var buf = try std.ArrayList(u8).initCapacity(self.allocator, slice.len);
304 errdefer buf.deinit(self.allocator);
305
306 var index: usize = 0;
307 while (code_page.codepointAt(index, slice)) |codepoint| : (index += codepoint.byte_len) {
308 const c = codepoint.value;
309 if (c == code_pages.Codepoint.invalid) {
310 try buf.appendSlice(self.allocator, "�");
311 } else {
312 // Anything that is not returned as an invalid codepoint must be encodable as UTF-8.
313 const utf8_len = std.unicode.utf8CodepointSequenceLength(c) catch unreachable;
314 try buf.ensureUnusedCapacity(self.allocator, utf8_len);
315 _ = std.unicode.utf8Encode(c, buf.unusedCapacitySlice()) catch unreachable;
316 buf.items.len += utf8_len;
317 }
318 }
319
320 return buf.toOwnedSlice(self.allocator);
321 },
322 .quoted_ascii_string, .quoted_wide_string => {
323 const slice = literal_node.token.slice(self.source);
324 const column = literal_node.token.calculateColumn(self.source, 8, null);
325 const bytes = SourceBytes{ .slice = slice, .code_page = self.input_code_pages.getForToken(literal_node.token) };
326
327 var buf: std.ArrayList(u8) = .empty;
328 errdefer buf.deinit(self.allocator);
329
330 // Filenames are sort-of parsed as if they were wide strings, but the max escape width of
331 // hex/octal escapes is still determined by the L prefix. Since we want to end up with
332 // UTF-8, we can parse either string type directly to UTF-8.
333 var parser = literals.IterativeStringParser.init(bytes, .{
334 .start_column = column,
335 .diagnostics = self.errContext(literal_node.token),
336 // TODO: Re-evaluate this. It's not been tested whether or not using the actual
337 // output code page would make more sense.
338 .output_code_page = .windows1252,
339 });
340
341 while (try parser.nextUnchecked()) |parsed| {
342 const c = parsed.codepoint;
343 if (c == code_pages.Codepoint.invalid) {
344 try buf.appendSlice(self.allocator, "�");
345 } else {
346 var codepoint_buf: [4]u8 = undefined;
347 // If the codepoint cannot be encoded, we fall back to �
348 if (std.unicode.utf8Encode(c, &codepoint_buf)) |len| {
349 try buf.appendSlice(self.allocator, codepoint_buf[0..len]);
350 } else |_| {
351 try buf.appendSlice(self.allocator, "�");
352 }
353 }
354 }
355
356 return buf.toOwnedSlice(self.allocator);
357 },
358 else => unreachable, // no other token types should be in a filename literal node
359 }
360 },
361 .binary_expression => {
362 const binary_expression_node = expression_node.cast(.binary_expression).?;
363 return self.evaluateFilenameExpression(binary_expression_node.right);
364 },
365 .grouped_expression => {
366 const grouped_expression_node = expression_node.cast(.grouped_expression).?;
367 return self.evaluateFilenameExpression(grouped_expression_node.expression);
368 },
369 else => unreachable,
370 }
371 }
372
373 /// https://learn.microsoft.com/en-us/windows/win32/menurc/searching-for-files
374 ///
375 /// Searches, in this order:
376 /// Directory of the 'root' .rc file (if different from CWD)
377 /// CWD
378 /// extra_include_paths (resolved relative to CWD)
379 /// system_include_paths (resolve relative to CWD)
380 /// INCLUDE environment var paths (only if ignore_include_env_var is false; resolved relative to CWD)
381 ///
382 /// Note: The CWD being searched *in addition to* the directory of the 'root' .rc file
383 /// is also how the Win32 RC compiler preprocessor searches for includes, but that
384 /// differs from how the clang preprocessor searches for includes.
385 ///
386 /// Note: This will always return the first matching file that can be opened.
387 /// This matches the Win32 RC compiler, which will fail with an error if the first
388 /// matching file is invalid. That is, it does not do the `cmd` PATH searching
389 /// thing of continuing to look for matching files until it finds a valid
390 /// one if a matching file is invalid.
391 fn searchForFile(self: *Compiler, path: []const u8) !std.fs.File {
392 // If the path is absolute, then it is not resolved relative to any search
393 // paths, so there's no point in checking them.
394 //
395 // This behavior was determined/confirmed with the following test:
396 // - A `test.rc` file with the contents `1 RCDATA "/test.bin"`
397 // - A `test.bin` file at `C:\test.bin`
398 // - A `test.bin` file at `inc\test.bin` relative to the .rc file
399 // - Invoking `rc` with `rc /i inc test.rc`
400 //
401 // This results in a .res file with the contents of `C:\test.bin`, not
402 // the contents of `inc\test.bin`. Further, if `C:\test.bin` is deleted,
403 // then it start failing to find `/test.bin`, meaning that it does not resolve
404 // `/test.bin` relative to include paths and instead only treats it as
405 // an absolute path.
406 if (std.fs.path.isAbsolute(path)) {
407 const file = try utils.openFileNotDir(std.fs.cwd(), path, .{});
408 errdefer file.close();
409
410 if (self.dependencies) |dependencies| {
411 const duped_path = try dependencies.allocator.dupe(u8, path);
412 errdefer dependencies.allocator.free(duped_path);
413 try dependencies.list.append(dependencies.allocator, duped_path);
414 }
415 }
416
417 var first_error: ?(std.fs.File.OpenError || std.fs.File.StatError) = null;
418 for (self.search_dirs) |search_dir| {
419 if (utils.openFileNotDir(search_dir.dir, path, .{})) |file| {
420 errdefer file.close();
421
422 if (self.dependencies) |dependencies| {
423 const searched_file_path = try std.fs.path.join(dependencies.allocator, &.{
424 search_dir.path orelse "", path,
425 });
426 errdefer dependencies.allocator.free(searched_file_path);
427 try dependencies.list.append(dependencies.allocator, searched_file_path);
428 }
429
430 return file;
431 } else |err| if (first_error == null) {
432 first_error = err;
433 }
434 }
435 return first_error orelse error.FileNotFound;
436 }
437
438 /// Returns a Windows-1252 encoded string regardless of the current output code page.
439 /// All codepoints are encoded as a maximum of 2 bytes, where unescaped codepoints
440 /// >= 0x10000 are encoded as `??` and everything else is encoded as 1 byte.
441 pub fn parseDlgIncludeString(self: *Compiler, token: Token) ![]u8 {
442 const bytes = self.sourceBytesForToken(token);
443 const output_code_page = self.output_code_pages.getForToken(token);
444
445 var buf = try std.ArrayList(u8).initCapacity(self.allocator, bytes.slice.len);
446 errdefer buf.deinit(self.allocator);
447
448 var iterative_parser = literals.IterativeStringParser.init(bytes, .{
449 .start_column = token.calculateColumn(self.source, 8, null),
450 .diagnostics = self.errContext(token),
451 // TODO: Potentially re-evaluate this, it's not been tested whether or not
452 // using the actual output code page would make more sense.
453 .output_code_page = .windows1252,
454 });
455
456 // This is similar to the logic in parseQuotedString, but ends up with everything
457 // encoded as Windows-1252. This effectively consolidates the two-step process
458 // of rc.exe into one step, since rc.exe's preprocessor converts to UTF-16 (this
459 // is when invalid sequences are replaced by the replacement character (U+FFFD)),
460 // and then that's run through the parser. Our preprocessor keeps things in their
461 // original encoding, meaning we emulate the <encoding> -> UTF-16 -> Windows-1252
462 // results all at once.
463 while (try iterative_parser.next()) |parsed| {
464 const c = parsed.codepoint;
465 switch (iterative_parser.declared_string_type) {
466 .wide => {
467 if (windows1252.bestFitFromCodepoint(c)) |best_fit| {
468 try buf.append(self.allocator, best_fit);
469 } else if (c < 0x10000 or c == code_pages.Codepoint.invalid or parsed.escaped_surrogate_pair) {
470 try buf.append(self.allocator, '?');
471 } else {
472 try buf.appendSlice(self.allocator, "??");
473 }
474 },
475 .ascii => {
476 if (parsed.from_escaped_integer) {
477 const truncated: u8 = @truncate(c);
478 switch (output_code_page) {
479 .utf8 => switch (truncated) {
480 0...0x7F => try buf.append(self.allocator, truncated),
481 else => try buf.append(self.allocator, '?'),
482 },
483 .windows1252 => {
484 try buf.append(self.allocator, truncated);
485 },
486 }
487 } else {
488 if (windows1252.bestFitFromCodepoint(c)) |best_fit| {
489 try buf.append(self.allocator, best_fit);
490 } else if (c < 0x10000 or c == code_pages.Codepoint.invalid) {
491 try buf.append(self.allocator, '?');
492 } else {
493 try buf.appendSlice(self.allocator, "??");
494 }
495 }
496 },
497 }
498 }
499
500 return buf.toOwnedSlice(self.allocator);
501 }
502
503 pub fn writeResourceExternal(self: *Compiler, node: *Node.ResourceExternal, writer: *std.Io.Writer) !void {
504 const io = self.io;
505
506 // Init header with data size zero for now, will need to fill it in later
507 var header = try self.resourceHeader(node.id, node.type, .{});
508 defer header.deinit(self.allocator);
509
510 const maybe_predefined_type = header.predefinedResourceType();
511
512 // DLGINCLUDE has special handling that doesn't actually need the file to exist
513 if (maybe_predefined_type != null and maybe_predefined_type.? == .DLGINCLUDE) {
514 const filename_token = node.filename.cast(.literal).?.token;
515 const parsed_filename = try self.parseDlgIncludeString(filename_token);
516 defer self.allocator.free(parsed_filename);
517
518 // NUL within the parsed string acts as a terminator
519 const parsed_filename_terminated = std.mem.sliceTo(parsed_filename, 0);
520
521 header.applyMemoryFlags(node.common_resource_attributes, self.source);
522 // This is effectively limited by `max_string_literal_codepoints` which is a u15.
523 // Each codepoint within a DLGINCLUDE string is encoded as a maximum of
524 // 2 bytes, which means that the maximum byte length of a DLGINCLUDE string is
525 // (including the NUL terminator): 32,767 * 2 + 1 = 65,535 or exactly the u16 max.
526 header.data_size = @intCast(parsed_filename_terminated.len + 1);
527 try header.write(writer, self.errContext(node.id));
528 try writer.writeAll(parsed_filename_terminated);
529 try writer.writeByte(0);
530 try writeDataPadding(writer, header.data_size);
531 return;
532 }
533
534 const filename_utf8 = try self.evaluateFilenameExpression(node.filename);
535 defer self.allocator.free(filename_utf8);
536
537 // TODO: More robust checking of the validity of the filename.
538 // This currently only checks for NUL bytes, but it should probably also check for
539 // platform-specific invalid characters like '*', '?', '"', '<', '>', '|' (Windows)
540 // Related: https://github.com/ziglang/zig/pull/14533#issuecomment-1416888193
541 if (std.mem.indexOfScalar(u8, filename_utf8, 0) != null) {
542 return self.addErrorDetailsAndFail(.{
543 .err = .invalid_filename,
544 .token = node.filename.getFirstToken(),
545 .token_span_end = node.filename.getLastToken(),
546 .extra = .{ .number = 0 },
547 });
548 }
549
550 // Allow plain number literals, but complex number expressions are evaluated strangely
551 // and almost certainly lead to things not intended by the user (e.g. '(1+-1)' evaluates
552 // to the filename '-1'), so error if the filename node is a grouped/binary expression.
553 // Note: This is done here instead of during parsing so that we can easily include
554 // the evaluated filename as part of the error messages.
555 if (node.filename.id != .literal) {
556 const filename_string_index = try self.diagnostics.putString(filename_utf8);
557 try self.addErrorDetails(.{
558 .err = .number_expression_as_filename,
559 .token = node.filename.getFirstToken(),
560 .token_span_end = node.filename.getLastToken(),
561 .extra = .{ .number = filename_string_index },
562 });
563 return self.addErrorDetailsAndFail(.{
564 .err = .number_expression_as_filename,
565 .type = .note,
566 .token = node.filename.getFirstToken(),
567 .token_span_end = node.filename.getLastToken(),
568 .print_source_line = false,
569 .extra = .{ .number = filename_string_index },
570 });
571 }
572 // From here on out, we know that the filename must be comprised of a single token,
573 // so get it here to simplify future usage.
574 const filename_token = node.filename.getFirstToken();
575
576 const file_handle = self.searchForFile(filename_utf8) catch |err| switch (err) {
577 error.OutOfMemory => |e| return e,
578 else => |e| {
579 const filename_string_index = try self.diagnostics.putString(filename_utf8);
580 return self.addErrorDetailsAndFail(.{
581 .err = .file_open_error,
582 .token = filename_token,
583 .extra = .{ .file_open_error = .{
584 .err = ErrorDetails.FileOpenError.enumFromError(e),
585 .filename_string_index = filename_string_index,
586 } },
587 });
588 },
589 };
590 defer file_handle.close();
591 var file_buffer: [2048]u8 = undefined;
592 var file_reader = file_handle.reader(io, &file_buffer);
593
594 if (maybe_predefined_type) |predefined_type| {
595 switch (predefined_type) {
596 .GROUP_ICON, .GROUP_CURSOR => {
597 // Check for animated icon first
598 if (ani.isAnimatedIcon(&file_reader.interface)) {
599 // Animated icons are just put into the resource unmodified,
600 // and the resource type changes to ANIICON/ANICURSOR
601
602 const new_predefined_type: res.RT = switch (predefined_type) {
603 .GROUP_ICON => .ANIICON,
604 .GROUP_CURSOR => .ANICURSOR,
605 else => unreachable,
606 };
607 header.type_value.ordinal = @intFromEnum(new_predefined_type);
608 header.memory_flags = MemoryFlags.defaults(new_predefined_type);
609 header.applyMemoryFlags(node.common_resource_attributes, self.source);
610 header.data_size = std.math.cast(u32, try file_reader.getSize()) orelse {
611 return self.addErrorDetailsAndFail(.{
612 .err = .resource_data_size_exceeds_max,
613 .token = node.id,
614 });
615 };
616
617 try header.write(writer, self.errContext(node.id));
618 try file_reader.seekTo(0);
619 try writeResourceData(writer, &file_reader.interface, header.data_size);
620 return;
621 }
622
623 // isAnimatedIcon moved the file cursor so reset to the start
624 try file_reader.seekTo(0);
625
626 const icon_dir = ico.read(self.allocator, &file_reader.interface, try file_reader.getSize()) catch |err| switch (err) {
627 error.OutOfMemory => |e| return e,
628 else => |e| {
629 return self.iconReadError(
630 e,
631 filename_utf8,
632 filename_token,
633 predefined_type,
634 );
635 },
636 };
637 defer icon_dir.deinit();
638
639 // This limit is inherent to the ico format since number of entries is a u16 field.
640 std.debug.assert(icon_dir.entries.len <= std.math.maxInt(u16));
641
642 // Note: The Win32 RC compiler will compile the resource as whatever type is
643 // in the icon_dir regardless of the type of resource specified in the .rc.
644 // This leads to unusable .res files when the types mismatch, so
645 // we error instead.
646 const res_types_match = switch (predefined_type) {
647 .GROUP_ICON => icon_dir.image_type == .icon,
648 .GROUP_CURSOR => icon_dir.image_type == .cursor,
649 else => unreachable,
650 };
651 if (!res_types_match) {
652 return self.addErrorDetailsAndFail(.{
653 .err = .icon_dir_and_resource_type_mismatch,
654 .token = filename_token,
655 .extra = .{ .resource = switch (predefined_type) {
656 .GROUP_ICON => .icon,
657 .GROUP_CURSOR => .cursor,
658 else => unreachable,
659 } },
660 });
661 }
662
663 // Memory flags affect the RT_ICON and the RT_GROUP_ICON differently
664 var icon_memory_flags = MemoryFlags.defaults(res.RT.ICON);
665 applyToMemoryFlags(&icon_memory_flags, node.common_resource_attributes, self.source);
666 applyToGroupMemoryFlags(&header.memory_flags, node.common_resource_attributes, self.source);
667
668 const first_icon_id = self.state.icon_id;
669 const entry_type = if (predefined_type == .GROUP_ICON) @intFromEnum(res.RT.ICON) else @intFromEnum(res.RT.CURSOR);
670 for (icon_dir.entries, 0..) |*entry, entry_i_usize| {
671 // We know that the entry index must fit within a u16, so
672 // cast it here to simplify usage sites.
673 const entry_i: u16 = @intCast(entry_i_usize);
674 var full_data_size = entry.data_size_in_bytes;
675 if (icon_dir.image_type == .cursor) {
676 full_data_size = std.math.add(u32, full_data_size, 4) catch {
677 return self.addErrorDetailsAndFail(.{
678 .err = .resource_data_size_exceeds_max,
679 .token = node.id,
680 });
681 };
682 }
683
684 const image_header = ResourceHeader{
685 .type_value = .{ .ordinal = entry_type },
686 .name_value = .{ .ordinal = self.state.icon_id },
687 .data_size = full_data_size,
688 .memory_flags = icon_memory_flags,
689 .language = self.state.language,
690 .version = self.state.version,
691 .characteristics = self.state.characteristics,
692 };
693 try image_header.write(writer, self.errContext(node.id));
694
695 // From https://learn.microsoft.com/en-us/windows/win32/menurc/localheader:
696 // > The LOCALHEADER structure is the first data written to the RT_CURSOR
697 // > resource if a RESDIR structure contains information about a cursor.
698 // where LOCALHEADER is `struct { WORD xHotSpot; WORD yHotSpot; }`
699 if (icon_dir.image_type == .cursor) {
700 try writer.writeInt(u16, entry.type_specific_data.cursor.hotspot_x, .little);
701 try writer.writeInt(u16, entry.type_specific_data.cursor.hotspot_y, .little);
702 }
703
704 try file_reader.seekTo(entry.data_offset_from_start_of_file);
705 var header_bytes: [16]u8 align(@alignOf(ico.BitmapHeader)) = (file_reader.interface.takeArray(16) catch {
706 return self.iconReadError(
707 error.UnexpectedEOF,
708 filename_utf8,
709 filename_token,
710 predefined_type,
711 );
712 }).*;
713
714 const image_format = ico.ImageFormat.detect(&header_bytes);
715 if (!image_format.validate(&header_bytes)) {
716 return self.iconReadError(
717 error.InvalidHeader,
718 filename_utf8,
719 filename_token,
720 predefined_type,
721 );
722 }
723 switch (image_format) {
724 .riff => switch (icon_dir.image_type) {
725 .icon => {
726 // The Win32 RC compiler treats this as an error, but icon dirs
727 // with RIFF encoded icons within them work ~okay (they work
728 // in some places but not others, they may not animate, etc) if they are
729 // allowed to be compiled.
730 try self.addErrorDetails(.{
731 .err = .rc_would_error_on_icon_dir,
732 .type = .warning,
733 .token = filename_token,
734 .extra = .{ .icon_dir = .{ .icon_type = .icon, .icon_format = .riff, .index = entry_i } },
735 });
736 try self.addErrorDetails(.{
737 .err = .rc_would_error_on_icon_dir,
738 .type = .note,
739 .print_source_line = false,
740 .token = filename_token,
741 .extra = .{ .icon_dir = .{ .icon_type = .icon, .icon_format = .riff, .index = entry_i } },
742 });
743 },
744 .cursor => {
745 // The Win32 RC compiler errors in this case too, but we only error
746 // here because the cursor would fail to be loaded at runtime if we
747 // compiled it.
748 return self.addErrorDetailsAndFail(.{
749 .err = .format_not_supported_in_icon_dir,
750 .token = filename_token,
751 .extra = .{ .icon_dir = .{ .icon_type = .cursor, .icon_format = .riff, .index = entry_i } },
752 });
753 },
754 },
755 .png => switch (icon_dir.image_type) {
756 .icon => {
757 // PNG always seems to have 1 for color planes no matter what
758 entry.type_specific_data.icon.color_planes = 1;
759 // These seem to be the only values of num_colors that
760 // get treated specially
761 entry.type_specific_data.icon.bits_per_pixel = switch (entry.num_colors) {
762 2 => 1,
763 8 => 3,
764 16 => 4,
765 else => entry.type_specific_data.icon.bits_per_pixel,
766 };
767 },
768 .cursor => {
769 // The Win32 RC compiler treats this as an error, but cursor dirs
770 // with PNG encoded icons within them work fine if they are
771 // allowed to be compiled.
772 try self.addErrorDetails(.{
773 .err = .rc_would_error_on_icon_dir,
774 .type = .warning,
775 .token = filename_token,
776 .extra = .{ .icon_dir = .{ .icon_type = .cursor, .icon_format = .png, .index = entry_i } },
777 });
778 },
779 },
780 .dib => {
781 const bitmap_header: *ico.BitmapHeader = @ptrCast(@alignCast(&header_bytes));
782 if (native_endian == .big) {
783 std.mem.byteSwapAllFields(ico.BitmapHeader, bitmap_header);
784 }
785 const bitmap_version = ico.BitmapHeader.Version.get(bitmap_header.bcSize);
786
787 // The Win32 RC compiler only allows headers with
788 // `bcSize == sizeof(BITMAPINFOHEADER)`, but it seems unlikely
789 // that there's a good reason for that outside of too-old
790 // bitmap headers.
791 // TODO: Need to test V4 and V5 bitmaps to check they actually work
792 if (bitmap_version == .@"win2.0") {
793 return self.addErrorDetailsAndFail(.{
794 .err = .rc_would_error_on_bitmap_version,
795 .token = filename_token,
796 .extra = .{ .icon_dir = .{
797 .icon_type = if (icon_dir.image_type == .icon) .icon else .cursor,
798 .icon_format = image_format,
799 .index = entry_i,
800 .bitmap_version = bitmap_version,
801 } },
802 });
803 } else if (bitmap_version != .@"nt3.1") {
804 try self.addErrorDetails(.{
805 .err = .rc_would_error_on_bitmap_version,
806 .type = .warning,
807 .token = filename_token,
808 .extra = .{ .icon_dir = .{
809 .icon_type = if (icon_dir.image_type == .icon) .icon else .cursor,
810 .icon_format = image_format,
811 .index = entry_i,
812 .bitmap_version = bitmap_version,
813 } },
814 });
815 }
816
817 switch (icon_dir.image_type) {
818 .icon => {
819 // The values in the icon's BITMAPINFOHEADER always take precedence over
820 // the values in the IconDir, but not in the LOCALHEADER (see above).
821 entry.type_specific_data.icon.color_planes = bitmap_header.bcPlanes;
822 entry.type_specific_data.icon.bits_per_pixel = bitmap_header.bcBitCount;
823 },
824 .cursor => {
825 // Only cursors get the width/height from BITMAPINFOHEADER (icons don't)
826 entry.width = @intCast(bitmap_header.bcWidth);
827 entry.height = @intCast(bitmap_header.bcHeight);
828 entry.type_specific_data.cursor.hotspot_x = bitmap_header.bcPlanes;
829 entry.type_specific_data.cursor.hotspot_y = bitmap_header.bcBitCount;
830 },
831 }
832 },
833 }
834
835 try file_reader.seekTo(entry.data_offset_from_start_of_file);
836 try writeResourceDataNoPadding(writer, &file_reader.interface, entry.data_size_in_bytes);
837 try writeDataPadding(writer, full_data_size);
838
839 if (self.state.icon_id == std.math.maxInt(u16)) {
840 try self.addErrorDetails(.{
841 .err = .max_icon_ids_exhausted,
842 .print_source_line = false,
843 .token = filename_token,
844 .extra = .{ .icon_dir = .{
845 .icon_type = if (icon_dir.image_type == .icon) .icon else .cursor,
846 .icon_format = image_format,
847 .index = entry_i,
848 } },
849 });
850 return self.addErrorDetailsAndFail(.{
851 .err = .max_icon_ids_exhausted,
852 .type = .note,
853 .token = filename_token,
854 .extra = .{ .icon_dir = .{
855 .icon_type = if (icon_dir.image_type == .icon) .icon else .cursor,
856 .icon_format = image_format,
857 .index = entry_i,
858 } },
859 });
860 }
861 self.state.icon_id += 1;
862 }
863
864 header.data_size = icon_dir.getResDataSize();
865
866 try header.write(writer, self.errContext(node.id));
867 try icon_dir.writeResData(writer, first_icon_id);
868 try writeDataPadding(writer, header.data_size);
869 return;
870 },
871 .RCDATA,
872 .HTML,
873 .MESSAGETABLE,
874 .DLGINIT,
875 .PLUGPLAY,
876 .VXD,
877 // Note: All of the below can only be specified by using a number
878 // as the resource type.
879 .MANIFEST,
880 .CURSOR,
881 .ICON,
882 .ANICURSOR,
883 .ANIICON,
884 .FONTDIR,
885 => {
886 header.applyMemoryFlags(node.common_resource_attributes, self.source);
887 },
888 .BITMAP => {
889 header.applyMemoryFlags(node.common_resource_attributes, self.source);
890 const file_size = try file_reader.getSize();
891
892 const bitmap_info = bmp.read(&file_reader.interface, file_size) catch |err| {
893 const filename_string_index = try self.diagnostics.putString(filename_utf8);
894 return self.addErrorDetailsAndFail(.{
895 .err = .bmp_read_error,
896 .token = filename_token,
897 .extra = .{ .bmp_read_error = .{
898 .err = ErrorDetails.BitmapReadError.enumFromError(err),
899 .filename_string_index = filename_string_index,
900 } },
901 });
902 };
903
904 if (bitmap_info.getActualPaletteByteLen() > bitmap_info.getExpectedPaletteByteLen()) {
905 const num_ignored_bytes = bitmap_info.getActualPaletteByteLen() - bitmap_info.getExpectedPaletteByteLen();
906 var number_as_bytes: [8]u8 = undefined;
907 std.mem.writeInt(u64, &number_as_bytes, num_ignored_bytes, native_endian);
908 const value_string_index = try self.diagnostics.putString(&number_as_bytes);
909 try self.addErrorDetails(.{
910 .err = .bmp_ignored_palette_bytes,
911 .type = .warning,
912 .token = filename_token,
913 .extra = .{ .number = value_string_index },
914 });
915 } else if (bitmap_info.getActualPaletteByteLen() < bitmap_info.getExpectedPaletteByteLen()) {
916 const num_padding_bytes = bitmap_info.getExpectedPaletteByteLen() - bitmap_info.getActualPaletteByteLen();
917
918 var number_as_bytes: [8]u8 = undefined;
919 std.mem.writeInt(u64, &number_as_bytes, num_padding_bytes, native_endian);
920 const value_string_index = try self.diagnostics.putString(&number_as_bytes);
921 try self.addErrorDetails(.{
922 .err = .bmp_missing_palette_bytes,
923 .type = .err,
924 .token = filename_token,
925 .extra = .{ .number = value_string_index },
926 });
927 const pixel_data_len = bitmap_info.getPixelDataLen(file_size);
928 // TODO: This is a hack, but we know we have already added
929 // at least one entry to the diagnostics strings, so we can
930 // get away with using 0 to mean 'no string' here.
931 var miscompiled_bytes_string_index: u32 = 0;
932 if (pixel_data_len > 0) {
933 const miscompiled_bytes = @min(pixel_data_len, num_padding_bytes);
934 std.mem.writeInt(u64, &number_as_bytes, miscompiled_bytes, native_endian);
935 miscompiled_bytes_string_index = try self.diagnostics.putString(&number_as_bytes);
936 }
937 return self.addErrorDetailsAndFail(.{
938 .err = .rc_would_miscompile_bmp_palette_padding,
939 .type = .note,
940 .print_source_line = false,
941 .token = filename_token,
942 .extra = .{ .number = miscompiled_bytes_string_index },
943 });
944 }
945
946 // TODO: It might be possible that the calculation done in this function
947 // could underflow if the underlying file is modified while reading
948 // it, but need to think about it more to determine if that's a
949 // real possibility
950 const bmp_bytes_to_write: u32 = @intCast(bitmap_info.getExpectedByteLen(file_size));
951
952 header.data_size = bmp_bytes_to_write;
953 try header.write(writer, self.errContext(node.id));
954 try file_reader.seekTo(bmp.file_header_len);
955 try writeResourceDataNoPadding(writer, &file_reader.interface, bitmap_info.dib_header_size);
956 if (bitmap_info.getBitmasksByteLen() > 0) {
957 try writeResourceDataNoPadding(writer, &file_reader.interface, bitmap_info.getBitmasksByteLen());
958 }
959 if (bitmap_info.getExpectedPaletteByteLen() > 0) {
960 try writeResourceDataNoPadding(writer, &file_reader.interface, @intCast(bitmap_info.getActualPaletteByteLen()));
961 }
962 try file_reader.seekTo(bitmap_info.pixel_data_offset);
963 const pixel_bytes: u32 = @intCast(file_size - bitmap_info.pixel_data_offset);
964 try writeResourceDataNoPadding(writer, &file_reader.interface, pixel_bytes);
965 try writeDataPadding(writer, bmp_bytes_to_write);
966 return;
967 },
968 .FONT => {
969 if (self.state.font_dir.ids.get(header.name_value.ordinal) != null) {
970 // Add warning and skip this resource
971 // Note: The Win32 compiler prints this as an error but it doesn't fail the compilation
972 // and the duplicate resource is skipped.
973 try self.addErrorDetails(.{
974 .err = .font_id_already_defined,
975 .token = node.id,
976 .type = .warning,
977 .extra = .{ .number = header.name_value.ordinal },
978 });
979 try self.addErrorDetails(.{
980 .err = .font_id_already_defined,
981 .token = self.state.font_dir.ids.get(header.name_value.ordinal).?,
982 .type = .note,
983 .extra = .{ .number = header.name_value.ordinal },
984 });
985 return;
986 }
987 header.applyMemoryFlags(node.common_resource_attributes, self.source);
988 const file_size = try file_reader.getSize();
989 if (file_size > std.math.maxInt(u32)) {
990 return self.addErrorDetailsAndFail(.{
991 .err = .resource_data_size_exceeds_max,
992 .token = node.id,
993 });
994 }
995
996 // We now know that the data size will fit in a u32
997 header.data_size = @intCast(file_size);
998 try header.write(writer, self.errContext(node.id));
999
1000 // Slurp the first 148 bytes separately so we can store them in the FontDir
1001 var font_dir_header_buf: [148]u8 = @splat(0);
1002 const populated_len: u32 = @intCast(try file_reader.interface.readSliceShort(&font_dir_header_buf));
1003
1004 // Write only the populated bytes slurped from the header
1005 try writer.writeAll(font_dir_header_buf[0..populated_len]);
1006 // Then write the rest of the bytes and the padding
1007 try writeResourceDataNoPadding(writer, &file_reader.interface, header.data_size - populated_len);
1008 try writeDataPadding(writer, header.data_size);
1009
1010 try self.state.font_dir.add(self.arena, FontDir.Font{
1011 .id = header.name_value.ordinal,
1012 .header_bytes = font_dir_header_buf,
1013 }, node.id);
1014 return;
1015 },
1016 .ACCELERATOR, // Cannot use an external file, enforced by the parser
1017 .DIALOG, // Cannot use an external file, enforced by the parser
1018 .DLGINCLUDE, // Handled specially above
1019 .MENU, // Cannot use an external file, enforced by the parser
1020 .STRING, // Parser error if this resource is specified as a number
1021 .TOOLBAR, // Cannot use an external file, enforced by the parser
1022 .VERSION, // Cannot use an external file, enforced by the parser
1023 => unreachable,
1024 _ => unreachable,
1025 }
1026 } else {
1027 header.applyMemoryFlags(node.common_resource_attributes, self.source);
1028 }
1029
1030 // Fallback to just writing out the entire contents of the file
1031 const data_size = try file_reader.getSize();
1032 if (data_size > std.math.maxInt(u32)) {
1033 return self.addErrorDetailsAndFail(.{
1034 .err = .resource_data_size_exceeds_max,
1035 .token = node.id,
1036 });
1037 }
1038 // We now know that the data size will fit in a u32
1039 header.data_size = @intCast(data_size);
1040 try header.write(writer, self.errContext(node.id));
1041 try writeResourceData(writer, &file_reader.interface, header.data_size);
1042 }
1043
1044 fn iconReadError(
1045 self: *Compiler,
1046 err: ico.ReadError,
1047 filename: []const u8,
1048 token: Token,
1049 predefined_type: res.RT,
1050 ) error{ CompileError, OutOfMemory } {
1051 const filename_string_index = try self.diagnostics.putString(filename);
1052 return self.addErrorDetailsAndFail(.{
1053 .err = .icon_read_error,
1054 .token = token,
1055 .extra = .{ .icon_read_error = .{
1056 .err = ErrorDetails.IconReadError.enumFromError(err),
1057 .icon_type = switch (predefined_type) {
1058 .GROUP_ICON => .icon,
1059 .GROUP_CURSOR => .cursor,
1060 else => unreachable,
1061 },
1062 .filename_string_index = filename_string_index,
1063 } },
1064 });
1065 }
1066
1067 pub const DataType = enum {
1068 number,
1069 ascii_string,
1070 wide_string,
1071 };
1072
1073 pub const Data = union(DataType) {
1074 number: Number,
1075 ascii_string: []const u8,
1076 wide_string: [:0]const u16,
1077
1078 pub fn deinit(self: Data, allocator: Allocator) void {
1079 switch (self) {
1080 .wide_string => |wide_string| {
1081 allocator.free(wide_string);
1082 },
1083 .ascii_string => |ascii_string| {
1084 allocator.free(ascii_string);
1085 },
1086 else => {},
1087 }
1088 }
1089
1090 pub fn write(self: Data, writer: *std.Io.Writer) !void {
1091 switch (self) {
1092 .number => |number| switch (number.is_long) {
1093 false => try writer.writeInt(WORD, number.asWord(), .little),
1094 true => try writer.writeInt(DWORD, number.value, .little),
1095 },
1096 .ascii_string => |ascii_string| {
1097 try writer.writeAll(ascii_string);
1098 },
1099 .wide_string => |wide_string| {
1100 try writer.writeAll(std.mem.sliceAsBytes(wide_string));
1101 },
1102 }
1103 }
1104 };
1105
1106 /// Assumes that the node is a number or number expression
1107 pub fn evaluateNumberExpression(expression_node: *Node, source: []const u8, code_page_lookup: *const CodePageLookup) Number {
1108 switch (expression_node.id) {
1109 .literal => {
1110 const literal_node = expression_node.cast(.literal).?;
1111 std.debug.assert(literal_node.token.id == .number);
1112 const bytes = SourceBytes{
1113 .slice = literal_node.token.slice(source),
1114 .code_page = code_page_lookup.getForToken(literal_node.token),
1115 };
1116 return literals.parseNumberLiteral(bytes);
1117 },
1118 .binary_expression => {
1119 const binary_expression_node = expression_node.cast(.binary_expression).?;
1120 const lhs = evaluateNumberExpression(binary_expression_node.left, source, code_page_lookup);
1121 const rhs = evaluateNumberExpression(binary_expression_node.right, source, code_page_lookup);
1122 const operator_char = binary_expression_node.operator.slice(source)[0];
1123 return lhs.evaluateOperator(operator_char, rhs);
1124 },
1125 .grouped_expression => {
1126 const grouped_expression_node = expression_node.cast(.grouped_expression).?;
1127 return evaluateNumberExpression(grouped_expression_node.expression, source, code_page_lookup);
1128 },
1129 else => unreachable,
1130 }
1131 }
1132
1133 const FlagsNumber = struct {
1134 value: u32,
1135 not_mask: u32 = 0xFFFFFFFF,
1136
1137 pub fn evaluateOperator(lhs: FlagsNumber, operator_char: u8, rhs: FlagsNumber) FlagsNumber {
1138 const result = switch (operator_char) {
1139 '-' => lhs.value -% rhs.value,
1140 '+' => lhs.value +% rhs.value,
1141 '|' => lhs.value | rhs.value,
1142 '&' => lhs.value & rhs.value,
1143 else => unreachable, // invalid operator, this would be a lexer/parser bug
1144 };
1145 return .{
1146 .value = result,
1147 .not_mask = lhs.not_mask & rhs.not_mask,
1148 };
1149 }
1150
1151 pub fn applyNotMask(self: FlagsNumber) u32 {
1152 return self.value & self.not_mask;
1153 }
1154 };
1155
1156 pub fn evaluateFlagsExpressionWithDefault(default: u32, expression_node: *Node, source: []const u8, code_page_lookup: *const CodePageLookup) u32 {
1157 var context = FlagsExpressionContext{ .initial_value = default };
1158 const number = evaluateFlagsExpression(expression_node, source, code_page_lookup, &context);
1159 return number.value;
1160 }
1161
1162 pub const FlagsExpressionContext = struct {
1163 initial_value: u32 = 0,
1164 initial_value_used: bool = false,
1165 };
1166
1167 /// Assumes that the node is a number expression (which can contain not_expressions)
1168 pub fn evaluateFlagsExpression(expression_node: *Node, source: []const u8, code_page_lookup: *const CodePageLookup, context: *FlagsExpressionContext) FlagsNumber {
1169 switch (expression_node.id) {
1170 .literal => {
1171 const literal_node = expression_node.cast(.literal).?;
1172 std.debug.assert(literal_node.token.id == .number);
1173 const bytes = SourceBytes{
1174 .slice = literal_node.token.slice(source),
1175 .code_page = code_page_lookup.getForToken(literal_node.token),
1176 };
1177 var value = literals.parseNumberLiteral(bytes).value;
1178 if (!context.initial_value_used) {
1179 context.initial_value_used = true;
1180 value |= context.initial_value;
1181 }
1182 return .{ .value = value };
1183 },
1184 .binary_expression => {
1185 const binary_expression_node = expression_node.cast(.binary_expression).?;
1186 const lhs = evaluateFlagsExpression(binary_expression_node.left, source, code_page_lookup, context);
1187 const rhs = evaluateFlagsExpression(binary_expression_node.right, source, code_page_lookup, context);
1188 const operator_char = binary_expression_node.operator.slice(source)[0];
1189 const result = lhs.evaluateOperator(operator_char, rhs);
1190 return .{ .value = result.applyNotMask() };
1191 },
1192 .grouped_expression => {
1193 const grouped_expression_node = expression_node.cast(.grouped_expression).?;
1194 return evaluateFlagsExpression(grouped_expression_node.expression, source, code_page_lookup, context);
1195 },
1196 .not_expression => {
1197 const not_expression = expression_node.cast(.not_expression).?;
1198 const bytes = SourceBytes{
1199 .slice = not_expression.number_token.slice(source),
1200 .code_page = code_page_lookup.getForToken(not_expression.number_token),
1201 };
1202 const not_number = literals.parseNumberLiteral(bytes);
1203 if (!context.initial_value_used) {
1204 context.initial_value_used = true;
1205 return .{ .value = context.initial_value & ~not_number.value };
1206 }
1207 return .{ .value = 0, .not_mask = ~not_number.value };
1208 },
1209 else => unreachable,
1210 }
1211 }
1212
1213 pub fn evaluateDataExpression(self: *Compiler, expression_node: *Node) !Data {
1214 switch (expression_node.id) {
1215 .literal => {
1216 const literal_node = expression_node.cast(.literal).?;
1217 switch (literal_node.token.id) {
1218 .number => {
1219 const number = evaluateNumberExpression(expression_node, self.source, self.input_code_pages);
1220 return .{ .number = number };
1221 },
1222 .quoted_ascii_string => {
1223 const column = literal_node.token.calculateColumn(self.source, 8, null);
1224 const bytes = SourceBytes{
1225 .slice = literal_node.token.slice(self.source),
1226 .code_page = self.input_code_pages.getForToken(literal_node.token),
1227 };
1228 const parsed = try literals.parseQuotedAsciiString(self.allocator, bytes, .{
1229 .start_column = column,
1230 .diagnostics = self.errContext(literal_node.token),
1231 .output_code_page = self.output_code_pages.getForToken(literal_node.token),
1232 });
1233 errdefer self.allocator.free(parsed);
1234 return .{ .ascii_string = parsed };
1235 },
1236 .quoted_wide_string => {
1237 const column = literal_node.token.calculateColumn(self.source, 8, null);
1238 const bytes = SourceBytes{
1239 .slice = literal_node.token.slice(self.source),
1240 .code_page = self.input_code_pages.getForToken(literal_node.token),
1241 };
1242 const parsed_string = try literals.parseQuotedWideString(self.allocator, bytes, .{
1243 .start_column = column,
1244 .diagnostics = self.errContext(literal_node.token),
1245 .output_code_page = self.output_code_pages.getForToken(literal_node.token),
1246 });
1247 errdefer self.allocator.free(parsed_string);
1248 return .{ .wide_string = parsed_string };
1249 },
1250 else => unreachable, // no other token types should be in a data literal node
1251 }
1252 },
1253 .binary_expression, .grouped_expression => {
1254 const result = evaluateNumberExpression(expression_node, self.source, self.input_code_pages);
1255 return .{ .number = result };
1256 },
1257 .not_expression => unreachable,
1258 else => unreachable,
1259 }
1260 }
1261
1262 pub fn writeResourceRawData(self: *Compiler, node: *Node.ResourceRawData, writer: *std.Io.Writer) !void {
1263 var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
1264 defer data_buffer.deinit();
1265
1266 for (node.raw_data) |expression| {
1267 const data = try self.evaluateDataExpression(expression);
1268 defer data.deinit(self.allocator);
1269 try data.write(&data_buffer.writer);
1270 }
1271
1272 // TODO: Limit data_buffer in some way to error when writing more than u32 max bytes
1273 const data_len: u32 = std.math.cast(u32, data_buffer.written().len) orelse {
1274 return self.addErrorDetailsAndFail(.{
1275 .err = .resource_data_size_exceeds_max,
1276 .token = node.id,
1277 });
1278 };
1279 try self.writeResourceHeader(writer, node.id, node.type, data_len, node.common_resource_attributes, self.state.language);
1280
1281 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
1282 try writeResourceData(writer, &data_fbs, data_len);
1283 }
1284
1285 pub fn writeResourceHeader(self: *Compiler, writer: *std.Io.Writer, id_token: Token, type_token: Token, data_size: u32, common_resource_attributes: []Token, language: res.Language) !void {
1286 var header = try self.resourceHeader(id_token, type_token, .{
1287 .language = language,
1288 .data_size = data_size,
1289 });
1290 defer header.deinit(self.allocator);
1291
1292 header.applyMemoryFlags(common_resource_attributes, self.source);
1293
1294 try header.write(writer, self.errContext(id_token));
1295 }
1296
1297 pub fn writeResourceDataNoPadding(writer: *std.Io.Writer, data_reader: *std.Io.Reader, data_size: u32) !void {
1298 try data_reader.streamExact(writer, data_size);
1299 }
1300
1301 pub fn writeResourceData(writer: *std.Io.Writer, data_reader: *std.Io.Reader, data_size: u32) !void {
1302 try writeResourceDataNoPadding(writer, data_reader, data_size);
1303 try writeDataPadding(writer, data_size);
1304 }
1305
1306 pub fn writeDataPadding(writer: *std.Io.Writer, data_size: u32) !void {
1307 try writer.splatByteAll(0, numPaddingBytesNeeded(data_size));
1308 }
1309
1310 pub fn numPaddingBytesNeeded(data_size: u32) u2 {
1311 // Result is guaranteed to be between 0 and 3.
1312 return @intCast((4 -% data_size) % 4);
1313 }
1314
1315 pub fn evaluateAcceleratorKeyExpression(self: *Compiler, node: *Node, is_virt: bool) !u16 {
1316 if (node.isNumberExpression()) {
1317 return evaluateNumberExpression(node, self.source, self.input_code_pages).asWord();
1318 } else {
1319 std.debug.assert(node.isStringLiteral());
1320 const literal: *Node.Literal = @alignCast(@fieldParentPtr("base", node));
1321 const bytes = SourceBytes{
1322 .slice = literal.token.slice(self.source),
1323 .code_page = self.input_code_pages.getForToken(literal.token),
1324 };
1325 const column = literal.token.calculateColumn(self.source, 8, null);
1326 return res.parseAcceleratorKeyString(bytes, is_virt, .{
1327 .start_column = column,
1328 .diagnostics = self.errContext(literal.token),
1329 .output_code_page = self.output_code_pages.getForToken(literal.token),
1330 });
1331 }
1332 }
1333
1334 pub fn writeAccelerators(self: *Compiler, node: *Node.Accelerators, writer: *std.Io.Writer) !void {
1335 var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
1336 defer data_buffer.deinit();
1337
1338 try self.writeAcceleratorsData(node, &data_buffer.writer);
1339
1340 // TODO: Limit data_buffer in some way to error when writing more than u32 max bytes
1341 const data_size: u32 = std.math.cast(u32, data_buffer.written().len) orelse {
1342 return self.addErrorDetailsAndFail(.{
1343 .err = .resource_data_size_exceeds_max,
1344 .token = node.id,
1345 });
1346 };
1347 var header = try self.resourceHeader(node.id, node.type, .{
1348 .data_size = data_size,
1349 });
1350 defer header.deinit(self.allocator);
1351
1352 header.applyMemoryFlags(node.common_resource_attributes, self.source);
1353 header.applyOptionalStatements(node.optional_statements, self.source, self.input_code_pages);
1354
1355 try header.write(writer, self.errContext(node.id));
1356
1357 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
1358 try writeResourceData(writer, &data_fbs, data_size);
1359 }
1360
1361 /// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
1362 /// the writer within this function could return error.NoSpaceLeft
1363 pub fn writeAcceleratorsData(self: *Compiler, node: *Node.Accelerators, data_writer: *std.Io.Writer) !void {
1364 for (node.accelerators, 0..) |accel_node, i| {
1365 const accelerator: *Node.Accelerator = @alignCast(@fieldParentPtr("base", accel_node));
1366 var modifiers = res.AcceleratorModifiers{};
1367 for (accelerator.type_and_options) |type_or_option| {
1368 const modifier = rc.AcceleratorTypeAndOptions.map.get(type_or_option.slice(self.source)).?;
1369 modifiers.apply(modifier);
1370 }
1371 if ((modifiers.isSet(.control) or modifiers.isSet(.shift)) and !modifiers.isSet(.virtkey)) {
1372 try self.addErrorDetails(.{
1373 .err = .accelerator_shift_or_control_without_virtkey,
1374 .type = .warning,
1375 // We know that one of SHIFT or CONTROL was specified, so there's at least one item
1376 // in this list.
1377 .token = accelerator.type_and_options[0],
1378 .token_span_end = accelerator.type_and_options[accelerator.type_and_options.len - 1],
1379 });
1380 }
1381 if (accelerator.event.isNumberExpression() and !modifiers.explicit_ascii_or_virtkey) {
1382 return self.addErrorDetailsAndFail(.{
1383 .err = .accelerator_type_required,
1384 .token = accelerator.event.getFirstToken(),
1385 .token_span_end = accelerator.event.getLastToken(),
1386 });
1387 }
1388 const key = self.evaluateAcceleratorKeyExpression(accelerator.event, modifiers.isSet(.virtkey)) catch |err| switch (err) {
1389 error.OutOfMemory => |e| return e,
1390 else => |e| {
1391 return self.addErrorDetailsAndFail(.{
1392 .err = .invalid_accelerator_key,
1393 .token = accelerator.event.getFirstToken(),
1394 .token_span_end = accelerator.event.getLastToken(),
1395 .extra = .{ .accelerator_error = .{
1396 .err = ErrorDetails.AcceleratorError.enumFromError(e),
1397 } },
1398 });
1399 },
1400 };
1401 const cmd_id = evaluateNumberExpression(accelerator.idvalue, self.source, self.input_code_pages);
1402
1403 if (i == node.accelerators.len - 1) {
1404 modifiers.markLast();
1405 }
1406
1407 try data_writer.writeByte(modifiers.value);
1408 try data_writer.writeByte(0); // padding
1409 try data_writer.writeInt(u16, key, .little);
1410 try data_writer.writeInt(u16, cmd_id.asWord(), .little);
1411 try data_writer.writeInt(u16, 0, .little); // padding
1412 }
1413 }
1414
1415 const DialogOptionalStatementValues = struct {
1416 style: u32 = res.WS.SYSMENU | res.WS.BORDER | res.WS.POPUP,
1417 exstyle: u32 = 0,
1418 class: ?NameOrOrdinal = null,
1419 menu: ?NameOrOrdinal = null,
1420 font: ?FontStatementValues = null,
1421 caption: ?Token = null,
1422 };
1423
1424 pub fn writeDialog(self: *Compiler, node: *Node.Dialog, writer: *std.Io.Writer) !void {
1425 var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
1426 defer data_buffer.deinit();
1427
1428 const resource = ResourceType.fromString(.{
1429 .slice = node.type.slice(self.source),
1430 .code_page = self.input_code_pages.getForToken(node.type),
1431 });
1432 std.debug.assert(resource == .dialog or resource == .dialogex);
1433
1434 var optional_statement_values: DialogOptionalStatementValues = .{};
1435 defer {
1436 if (optional_statement_values.class) |class| {
1437 class.deinit(self.allocator);
1438 }
1439 if (optional_statement_values.menu) |menu| {
1440 menu.deinit(self.allocator);
1441 }
1442 }
1443 var last_menu: *Node.SimpleStatement = undefined;
1444 var last_class: *Node.SimpleStatement = undefined;
1445 var last_menu_would_be_forced_ordinal = false;
1446 var last_menu_has_digit_as_first_char = false;
1447 var last_menu_did_uppercase = false;
1448 var last_class_would_be_forced_ordinal = false;
1449
1450 for (node.optional_statements) |optional_statement| {
1451 switch (optional_statement.id) {
1452 .simple_statement => {
1453 const simple_statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", optional_statement));
1454 const statement_identifier = simple_statement.identifier;
1455 const statement_type = rc.OptionalStatements.dialog_map.get(statement_identifier.slice(self.source)) orelse continue;
1456 switch (statement_type) {
1457 .style, .exstyle => {
1458 const style = evaluateFlagsExpressionWithDefault(0, simple_statement.value, self.source, self.input_code_pages);
1459 if (statement_type == .style) {
1460 optional_statement_values.style = style;
1461 } else {
1462 optional_statement_values.exstyle = style;
1463 }
1464 },
1465 .caption => {
1466 std.debug.assert(simple_statement.value.id == .literal);
1467 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", simple_statement.value));
1468 optional_statement_values.caption = literal_node.token;
1469 },
1470 .class => {
1471 const is_duplicate = optional_statement_values.class != null;
1472 const forced_ordinal = is_duplicate and optional_statement_values.class.? == .ordinal;
1473 // In the Win32 RC compiler, if any CLASS values that are interpreted as
1474 // an ordinal exist, it affects all future CLASS statements and forces
1475 // them to be treated as an ordinal no matter what.
1476 if (forced_ordinal) {
1477 last_class_would_be_forced_ordinal = true;
1478 }
1479 // clear out the old one if it exists
1480 if (optional_statement_values.class) |prev| {
1481 prev.deinit(self.allocator);
1482 optional_statement_values.class = null;
1483 }
1484
1485 if (simple_statement.value.isNumberExpression()) {
1486 const class_ordinal = evaluateNumberExpression(simple_statement.value, self.source, self.input_code_pages);
1487 optional_statement_values.class = NameOrOrdinal{ .ordinal = class_ordinal.asWord() };
1488 } else {
1489 std.debug.assert(simple_statement.value.isStringLiteral());
1490 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", simple_statement.value));
1491 const parsed = try self.parseQuotedStringAsWideString(literal_node.token);
1492 optional_statement_values.class = NameOrOrdinal{ .name = parsed };
1493 }
1494
1495 last_class = simple_statement;
1496 },
1497 .menu => {
1498 const is_duplicate = optional_statement_values.menu != null;
1499 const forced_ordinal = is_duplicate and optional_statement_values.menu.? == .ordinal;
1500 // In the Win32 RC compiler, if any MENU values that are interpreted as
1501 // an ordinal exist, it affects all future MENU statements and forces
1502 // them to be treated as an ordinal no matter what.
1503 if (forced_ordinal) {
1504 last_menu_would_be_forced_ordinal = true;
1505 }
1506 // clear out the old one if it exists
1507 if (optional_statement_values.menu) |prev| {
1508 prev.deinit(self.allocator);
1509 optional_statement_values.menu = null;
1510 }
1511
1512 std.debug.assert(simple_statement.value.id == .literal);
1513 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", simple_statement.value));
1514
1515 const token_slice = literal_node.token.slice(self.source);
1516 const bytes = SourceBytes{
1517 .slice = token_slice,
1518 .code_page = self.input_code_pages.getForToken(literal_node.token),
1519 };
1520 optional_statement_values.menu = try NameOrOrdinal.fromString(self.allocator, bytes);
1521
1522 if (optional_statement_values.menu.? == .name) {
1523 if (NameOrOrdinal.maybeNonAsciiOrdinalFromString(bytes)) |win32_rc_ordinal| {
1524 try self.addErrorDetails(.{
1525 .err = .invalid_digit_character_in_ordinal,
1526 .type = .err,
1527 .token = literal_node.token,
1528 });
1529 return self.addErrorDetailsAndFail(.{
1530 .err = .win32_non_ascii_ordinal,
1531 .type = .note,
1532 .token = literal_node.token,
1533 .print_source_line = false,
1534 .extra = .{ .number = win32_rc_ordinal.ordinal },
1535 });
1536 }
1537 }
1538
1539 // Need to keep track of some properties of the value
1540 // in order to emit the appropriate warning(s) later on.
1541 // See where the warning are emitted below (outside this loop)
1542 // for the full explanation.
1543 var did_uppercase = false;
1544 var codepoint_i: usize = 0;
1545 while (bytes.code_page.codepointAt(codepoint_i, bytes.slice)) |codepoint| : (codepoint_i += codepoint.byte_len) {
1546 const c = codepoint.value;
1547 switch (c) {
1548 'a'...'z' => {
1549 did_uppercase = true;
1550 break;
1551 },
1552 else => {},
1553 }
1554 }
1555 last_menu_did_uppercase = did_uppercase;
1556 last_menu_has_digit_as_first_char = std.ascii.isDigit(token_slice[0]);
1557 last_menu = simple_statement;
1558 },
1559 else => {},
1560 }
1561 },
1562 .font_statement => {
1563 const font: *Node.FontStatement = @alignCast(@fieldParentPtr("base", optional_statement));
1564 if (optional_statement_values.font != null) {
1565 optional_statement_values.font.?.node = font;
1566 } else {
1567 optional_statement_values.font = FontStatementValues{ .node = font };
1568 }
1569 if (font.weight) |weight| {
1570 const value = evaluateNumberExpression(weight, self.source, self.input_code_pages);
1571 optional_statement_values.font.?.weight = value.asWord();
1572 }
1573 if (font.italic) |italic| {
1574 const value = evaluateNumberExpression(italic, self.source, self.input_code_pages);
1575 optional_statement_values.font.?.italic = value.asWord() != 0;
1576 }
1577 },
1578 else => {},
1579 }
1580 }
1581
1582 // The Win32 RC compiler miscompiles the value in the following scenario:
1583 // Multiple CLASS parameters are specified and any of them are treated as a number, then
1584 // the last CLASS is always treated as a number no matter what
1585 if (last_class_would_be_forced_ordinal and optional_statement_values.class.? == .name) {
1586 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", last_class.value));
1587 const ordinal_value = res.ForcedOrdinal.fromUtf16Le(optional_statement_values.class.?.name);
1588
1589 try self.addErrorDetails(.{
1590 .err = .rc_would_miscompile_dialog_class,
1591 .type = .warning,
1592 .token = literal_node.token,
1593 .extra = .{ .number = ordinal_value },
1594 });
1595 try self.addErrorDetails(.{
1596 .err = .rc_would_miscompile_dialog_class,
1597 .type = .note,
1598 .print_source_line = false,
1599 .token = literal_node.token,
1600 .extra = .{ .number = ordinal_value },
1601 });
1602 try self.addErrorDetails(.{
1603 .err = .rc_would_miscompile_dialog_menu_or_class_id_forced_ordinal,
1604 .type = .note,
1605 .print_source_line = false,
1606 .token = literal_node.token,
1607 .extra = .{ .menu_or_class = .class },
1608 });
1609 }
1610 // The Win32 RC compiler miscompiles the id in two different scenarios:
1611 // 1. The first character of the ID is a digit, in which case it is always treated as a number
1612 // no matter what (and therefore does not match how the MENU/MENUEX id is parsed)
1613 // 2. Multiple MENU parameters are specified and any of them are treated as a number, then
1614 // the last MENU is always treated as a number no matter what
1615 if ((last_menu_would_be_forced_ordinal or last_menu_has_digit_as_first_char) and optional_statement_values.menu.? == .name) {
1616 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", last_menu.value));
1617 const token_slice = literal_node.token.slice(self.source);
1618 const bytes = SourceBytes{
1619 .slice = token_slice,
1620 .code_page = self.input_code_pages.getForToken(literal_node.token),
1621 };
1622 const ordinal_value = res.ForcedOrdinal.fromBytes(bytes);
1623
1624 try self.addErrorDetails(.{
1625 .err = .rc_would_miscompile_dialog_menu_id,
1626 .type = .warning,
1627 .token = literal_node.token,
1628 .extra = .{ .number = ordinal_value },
1629 });
1630 try self.addErrorDetails(.{
1631 .err = .rc_would_miscompile_dialog_menu_id,
1632 .type = .note,
1633 .print_source_line = false,
1634 .token = literal_node.token,
1635 .extra = .{ .number = ordinal_value },
1636 });
1637 if (last_menu_would_be_forced_ordinal) {
1638 try self.addErrorDetails(.{
1639 .err = .rc_would_miscompile_dialog_menu_or_class_id_forced_ordinal,
1640 .type = .note,
1641 .print_source_line = false,
1642 .token = literal_node.token,
1643 .extra = .{ .menu_or_class = .menu },
1644 });
1645 } else {
1646 try self.addErrorDetails(.{
1647 .err = .rc_would_miscompile_dialog_menu_id_starts_with_digit,
1648 .type = .note,
1649 .print_source_line = false,
1650 .token = literal_node.token,
1651 });
1652 }
1653 }
1654 // The MENU id parsing uses the exact same logic as the MENU/MENUEX resource id parsing,
1655 // which means that it will convert ASCII characters to uppercase during the 'name' parsing.
1656 // This turns out not to matter (`LoadMenu` does a case-insensitive lookup anyway),
1657 // but it still makes sense to share the uppercasing logic since the MENU parameter
1658 // here is just a reference to a MENU/MENUEX id within the .exe.
1659 // So, because this is an intentional but inconsequential-to-the-user difference
1660 // between resinator and the Win32 RC compiler, we only emit a hint instead of
1661 // a warning.
1662 if (last_menu_did_uppercase) {
1663 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", last_menu.value));
1664 try self.addErrorDetails(.{
1665 .err = .dialog_menu_id_was_uppercased,
1666 .type = .hint,
1667 .token = literal_node.token,
1668 });
1669 }
1670
1671 const x = evaluateNumberExpression(node.x, self.source, self.input_code_pages);
1672 const y = evaluateNumberExpression(node.y, self.source, self.input_code_pages);
1673 const width = evaluateNumberExpression(node.width, self.source, self.input_code_pages);
1674 const height = evaluateNumberExpression(node.height, self.source, self.input_code_pages);
1675
1676 // FONT statement requires DS_SETFONT, and if it's not present DS_SETFRONT must be unset
1677 if (optional_statement_values.font) |_| {
1678 optional_statement_values.style |= res.DS.SETFONT;
1679 } else {
1680 optional_statement_values.style &= ~res.DS.SETFONT;
1681 }
1682 // CAPTION statement implies WS_CAPTION
1683 if (optional_statement_values.caption) |_| {
1684 optional_statement_values.style |= res.WS.CAPTION;
1685 }
1686
1687 // NOTE: Dialog header and menu/class/title strings can never exceed u32 bytes
1688 // on their own.
1689 try self.writeDialogHeaderAndStrings(
1690 node,
1691 &data_buffer.writer,
1692 resource,
1693 &optional_statement_values,
1694 x,
1695 y,
1696 width,
1697 height,
1698 );
1699
1700 var controls_by_id = std.AutoHashMap(u32, *const Node.ControlStatement).init(self.allocator);
1701 // Number of controls are guaranteed by the parser to be within maxInt(u16).
1702 try controls_by_id.ensureTotalCapacity(@as(u16, @intCast(node.controls.len)));
1703 defer controls_by_id.deinit();
1704
1705 for (node.controls) |control_node| {
1706 const control: *Node.ControlStatement = @alignCast(@fieldParentPtr("base", control_node));
1707
1708 try self.writeDialogControl(
1709 control,
1710 &data_buffer.writer,
1711 resource,
1712 // We know the data_buffer len is limited to u32 max.
1713 @intCast(data_buffer.written().len),
1714 &controls_by_id,
1715 );
1716
1717 if (data_buffer.written().len > std.math.maxInt(u32)) {
1718 try self.addErrorDetails(.{
1719 .err = .resource_data_size_exceeds_max,
1720 .token = node.id,
1721 });
1722 return self.addErrorDetailsAndFail(.{
1723 .err = .resource_data_size_exceeds_max,
1724 .type = .note,
1725 .token = control.type,
1726 });
1727 }
1728 }
1729
1730 // We know the data_buffer len is limited to u32 max.
1731 const data_size: u32 = @intCast(data_buffer.written().len);
1732 var header = try self.resourceHeader(node.id, node.type, .{
1733 .data_size = data_size,
1734 });
1735 defer header.deinit(self.allocator);
1736
1737 header.applyMemoryFlags(node.common_resource_attributes, self.source);
1738 header.applyOptionalStatements(node.optional_statements, self.source, self.input_code_pages);
1739
1740 try header.write(writer, self.errContext(node.id));
1741
1742 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
1743 try writeResourceData(writer, &data_fbs, data_size);
1744 }
1745
1746 fn writeDialogHeaderAndStrings(
1747 self: *Compiler,
1748 node: *Node.Dialog,
1749 data_writer: *std.Io.Writer,
1750 resource: ResourceType,
1751 optional_statement_values: *const DialogOptionalStatementValues,
1752 x: Number,
1753 y: Number,
1754 width: Number,
1755 height: Number,
1756 ) !void {
1757 // Header
1758 if (resource == .dialogex) {
1759 const help_id: u32 = help_id: {
1760 if (node.help_id == null) break :help_id 0;
1761 break :help_id evaluateNumberExpression(node.help_id.?, self.source, self.input_code_pages).value;
1762 };
1763 try data_writer.writeInt(u16, 1, .little); // version number, always 1
1764 try data_writer.writeInt(u16, 0xFFFF, .little); // signature, always 0xFFFF
1765 try data_writer.writeInt(u32, help_id, .little);
1766 try data_writer.writeInt(u32, optional_statement_values.exstyle, .little);
1767 try data_writer.writeInt(u32, optional_statement_values.style, .little);
1768 } else {
1769 try data_writer.writeInt(u32, optional_statement_values.style, .little);
1770 try data_writer.writeInt(u32, optional_statement_values.exstyle, .little);
1771 }
1772 // This limit is enforced by the parser, so we know the number of controls
1773 // is within the range of a u16.
1774 try data_writer.writeInt(u16, @as(u16, @intCast(node.controls.len)), .little);
1775 try data_writer.writeInt(u16, x.asWord(), .little);
1776 try data_writer.writeInt(u16, y.asWord(), .little);
1777 try data_writer.writeInt(u16, width.asWord(), .little);
1778 try data_writer.writeInt(u16, height.asWord(), .little);
1779
1780 // Menu
1781 if (optional_statement_values.menu) |menu| {
1782 try menu.write(data_writer);
1783 } else {
1784 try data_writer.writeInt(u16, 0, .little);
1785 }
1786 // Class
1787 if (optional_statement_values.class) |class| {
1788 try class.write(data_writer);
1789 } else {
1790 try data_writer.writeInt(u16, 0, .little);
1791 }
1792 // Caption
1793 if (optional_statement_values.caption) |caption| {
1794 const parsed = try self.parseQuotedStringAsWideString(caption);
1795 defer self.allocator.free(parsed);
1796 try data_writer.writeAll(std.mem.sliceAsBytes(parsed[0 .. parsed.len + 1]));
1797 } else {
1798 try data_writer.writeInt(u16, 0, .little);
1799 }
1800 // Font
1801 if (optional_statement_values.font) |font| {
1802 try self.writeDialogFont(resource, font, data_writer);
1803 }
1804 }
1805
1806 fn writeDialogControl(
1807 self: *Compiler,
1808 control: *Node.ControlStatement,
1809 data_writer: *std.Io.Writer,
1810 resource: ResourceType,
1811 bytes_written_so_far: u32,
1812 controls_by_id: *std.AutoHashMap(u32, *const Node.ControlStatement),
1813 ) !void {
1814 const control_type = rc.Control.map.get(control.type.slice(self.source)).?;
1815
1816 // Each control must be at a 4-byte boundary. However, the Windows RC
1817 // compiler will miscompile controls if their extra data ends on an odd offset.
1818 // We will avoid the miscompilation and emit a warning.
1819 const num_padding = numPaddingBytesNeeded(bytes_written_so_far);
1820 if (num_padding == 1 or num_padding == 3) {
1821 try self.addErrorDetails(.{
1822 .err = .rc_would_miscompile_control_padding,
1823 .type = .warning,
1824 .token = control.type,
1825 });
1826 try self.addErrorDetails(.{
1827 .err = .rc_would_miscompile_control_padding,
1828 .type = .note,
1829 .print_source_line = false,
1830 .token = control.type,
1831 });
1832 }
1833 try data_writer.splatByteAll(0, num_padding);
1834
1835 const style = if (control.style) |style_expression|
1836 // Certain styles are implied by the control type
1837 evaluateFlagsExpressionWithDefault(res.ControlClass.getImpliedStyle(control_type), style_expression, self.source, self.input_code_pages)
1838 else
1839 res.ControlClass.getImpliedStyle(control_type);
1840
1841 const exstyle = if (control.exstyle) |exstyle_expression|
1842 evaluateFlagsExpressionWithDefault(0, exstyle_expression, self.source, self.input_code_pages)
1843 else
1844 0;
1845
1846 switch (resource) {
1847 .dialog => {
1848 // Note: Reverse order from DIALOGEX
1849 try data_writer.writeInt(u32, style, .little);
1850 try data_writer.writeInt(u32, exstyle, .little);
1851 },
1852 .dialogex => {
1853 const help_id: u32 = if (control.help_id) |help_id_expression|
1854 evaluateNumberExpression(help_id_expression, self.source, self.input_code_pages).value
1855 else
1856 0;
1857 try data_writer.writeInt(u32, help_id, .little);
1858 // Note: Reverse order from DIALOG
1859 try data_writer.writeInt(u32, exstyle, .little);
1860 try data_writer.writeInt(u32, style, .little);
1861 },
1862 else => unreachable,
1863 }
1864
1865 const control_x = evaluateNumberExpression(control.x, self.source, self.input_code_pages);
1866 const control_y = evaluateNumberExpression(control.y, self.source, self.input_code_pages);
1867 const control_width = evaluateNumberExpression(control.width, self.source, self.input_code_pages);
1868 const control_height = evaluateNumberExpression(control.height, self.source, self.input_code_pages);
1869
1870 try data_writer.writeInt(u16, control_x.asWord(), .little);
1871 try data_writer.writeInt(u16, control_y.asWord(), .little);
1872 try data_writer.writeInt(u16, control_width.asWord(), .little);
1873 try data_writer.writeInt(u16, control_height.asWord(), .little);
1874
1875 const control_id = evaluateNumberExpression(control.id, self.source, self.input_code_pages);
1876 switch (resource) {
1877 .dialog => try data_writer.writeInt(u16, control_id.asWord(), .little),
1878 .dialogex => try data_writer.writeInt(u32, control_id.value, .little),
1879 else => unreachable,
1880 }
1881
1882 const control_id_for_map: u32 = switch (resource) {
1883 .dialog => control_id.asWord(),
1884 .dialogex => control_id.value,
1885 else => unreachable,
1886 };
1887 const result = controls_by_id.getOrPutAssumeCapacity(control_id_for_map);
1888 if (result.found_existing) {
1889 if (!self.silent_duplicate_control_ids) {
1890 try self.addErrorDetails(.{
1891 .err = .control_id_already_defined,
1892 .type = .warning,
1893 .token = control.id.getFirstToken(),
1894 .token_span_end = control.id.getLastToken(),
1895 .extra = .{ .number = control_id_for_map },
1896 });
1897 try self.addErrorDetails(.{
1898 .err = .control_id_already_defined,
1899 .type = .note,
1900 .token = result.value_ptr.*.id.getFirstToken(),
1901 .token_span_end = result.value_ptr.*.id.getLastToken(),
1902 .extra = .{ .number = control_id_for_map },
1903 });
1904 }
1905 } else {
1906 result.value_ptr.* = control;
1907 }
1908
1909 if (res.ControlClass.fromControl(control_type)) |control_class| {
1910 const ordinal = NameOrOrdinal{ .ordinal = @intFromEnum(control_class) };
1911 try ordinal.write(data_writer);
1912 } else {
1913 const class_node = control.class.?;
1914 if (class_node.isNumberExpression()) {
1915 const number = evaluateNumberExpression(class_node, self.source, self.input_code_pages);
1916 const ordinal = NameOrOrdinal{ .ordinal = number.asWord() };
1917 // This is different from how the Windows RC compiles ordinals here,
1918 // but I think that's a miscompilation/bug of the Windows implementation.
1919 // The Windows behavior is (where LSB = least significant byte):
1920 // - If the LSB is 0x00 => 0xFFFF0000
1921 // - If the LSB is < 0x80 => 0x000000<LSB>
1922 // - If the LSB is >= 0x80 => 0x0000FF<LSB>
1923 //
1924 // Because of this, we emit a warning about the potential miscompilation
1925 try self.addErrorDetails(.{
1926 .err = .rc_would_miscompile_control_class_ordinal,
1927 .type = .warning,
1928 .token = class_node.getFirstToken(),
1929 .token_span_end = class_node.getLastToken(),
1930 });
1931 try self.addErrorDetails(.{
1932 .err = .rc_would_miscompile_control_class_ordinal,
1933 .type = .note,
1934 .print_source_line = false,
1935 .token = class_node.getFirstToken(),
1936 .token_span_end = class_node.getLastToken(),
1937 });
1938 // And then write out the ordinal using a proper a NameOrOrdinal encoding.
1939 try ordinal.write(data_writer);
1940 } else if (class_node.isStringLiteral()) {
1941 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", class_node));
1942 const parsed = try self.parseQuotedStringAsWideString(literal_node.token);
1943 defer self.allocator.free(parsed);
1944 if (rc.ControlClass.fromWideString(parsed)) |control_class| {
1945 const ordinal = NameOrOrdinal{ .ordinal = @intFromEnum(control_class) };
1946 try ordinal.write(data_writer);
1947 } else {
1948 // NUL acts as a terminator
1949 // TODO: Maybe warn when parsed_terminated.len != parsed.len, since
1950 // it seems unlikely that NUL-termination is something intentional
1951 const parsed_terminated = std.mem.sliceTo(parsed, 0);
1952 const name = NameOrOrdinal{ .name = parsed_terminated };
1953 try name.write(data_writer);
1954 }
1955 } else {
1956 const literal_node: *Node.Literal = @alignCast(@fieldParentPtr("base", class_node));
1957 const literal_slice = literal_node.token.slice(self.source);
1958 // This succeeding is guaranteed by the parser
1959 const control_class = rc.ControlClass.map.get(literal_slice) orelse unreachable;
1960 const ordinal = NameOrOrdinal{ .ordinal = @intFromEnum(control_class) };
1961 try ordinal.write(data_writer);
1962 }
1963 }
1964
1965 if (control.text) |text_token| {
1966 const bytes = SourceBytes{
1967 .slice = text_token.slice(self.source),
1968 .code_page = self.input_code_pages.getForToken(text_token),
1969 };
1970 if (text_token.isStringLiteral()) {
1971 const text = try self.parseQuotedStringAsWideString(text_token);
1972 defer self.allocator.free(text);
1973 const name = NameOrOrdinal{ .name = text };
1974 try name.write(data_writer);
1975 } else {
1976 std.debug.assert(text_token.id == .number);
1977 const number = literals.parseNumberLiteral(bytes);
1978 const ordinal = NameOrOrdinal{ .ordinal = number.asWord() };
1979 try ordinal.write(data_writer);
1980 }
1981 } else {
1982 try NameOrOrdinal.writeEmpty(data_writer);
1983 }
1984
1985 // The extra data byte length must be able to fit within a u16.
1986 var extra_data_buf: std.Io.Writer.Allocating = .init(self.allocator);
1987 defer extra_data_buf.deinit();
1988 for (control.extra_data) |data_expression| {
1989 const data = try self.evaluateDataExpression(data_expression);
1990 defer data.deinit(self.allocator);
1991 try data.write(&extra_data_buf.writer);
1992
1993 if (extra_data_buf.written().len > std.math.maxInt(u16)) {
1994 try self.addErrorDetails(.{
1995 .err = .control_extra_data_size_exceeds_max,
1996 .token = control.type,
1997 });
1998 return self.addErrorDetailsAndFail(.{
1999 .err = .control_extra_data_size_exceeds_max,
2000 .type = .note,
2001 .token = data_expression.getFirstToken(),
2002 .token_span_end = data_expression.getLastToken(),
2003 });
2004 }
2005 }
2006 // We know the extra_data_buf size fits within a u16.
2007 const extra_data_size: u16 = @intCast(extra_data_buf.written().len);
2008 try data_writer.writeInt(u16, extra_data_size, .little);
2009 try data_writer.writeAll(extra_data_buf.written());
2010 }
2011
2012 pub fn writeToolbar(self: *Compiler, node: *Node.Toolbar, writer: *std.Io.Writer) !void {
2013 var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
2014 defer data_buffer.deinit();
2015 const data_writer = &data_buffer.writer;
2016
2017 const button_width = evaluateNumberExpression(node.button_width, self.source, self.input_code_pages);
2018 const button_height = evaluateNumberExpression(node.button_height, self.source, self.input_code_pages);
2019
2020 // I'm assuming this is some sort of version
2021 // TODO: Try to find something mentioning this
2022 try data_writer.writeInt(u16, 1, .little);
2023 try data_writer.writeInt(u16, button_width.asWord(), .little);
2024 try data_writer.writeInt(u16, button_height.asWord(), .little);
2025 // Number of buttons is guaranteed by the parser to be within maxInt(u16).
2026 try data_writer.writeInt(u16, @as(u16, @intCast(node.buttons.len)), .little);
2027
2028 for (node.buttons) |button_or_sep| {
2029 switch (button_or_sep.id) {
2030 .literal => { // This is always SEPARATOR
2031 std.debug.assert(button_or_sep.cast(.literal).?.token.id == .literal);
2032 try data_writer.writeInt(u16, 0, .little);
2033 },
2034 .simple_statement => {
2035 const value_node = button_or_sep.cast(.simple_statement).?.value;
2036 const value = evaluateNumberExpression(value_node, self.source, self.input_code_pages);
2037 try data_writer.writeInt(u16, value.asWord(), .little);
2038 },
2039 else => unreachable, // This is a bug in the parser
2040 }
2041 }
2042
2043 const data_size: u32 = @intCast(data_buffer.written().len);
2044 var header = try self.resourceHeader(node.id, node.type, .{
2045 .data_size = data_size,
2046 });
2047 defer header.deinit(self.allocator);
2048
2049 header.applyMemoryFlags(node.common_resource_attributes, self.source);
2050
2051 try header.write(writer, self.errContext(node.id));
2052
2053 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
2054 try writeResourceData(writer, &data_fbs, data_size);
2055 }
2056
2057 /// Weight and italic carry over from previous FONT statements within a single resource,
2058 /// so they need to be parsed ahead-of-time and stored
2059 const FontStatementValues = struct {
2060 weight: u16 = 0,
2061 italic: bool = false,
2062 node: *Node.FontStatement,
2063 };
2064
2065 pub fn writeDialogFont(self: *Compiler, resource: ResourceType, values: FontStatementValues, writer: *std.Io.Writer) !void {
2066 const node = values.node;
2067 const point_size = evaluateNumberExpression(node.point_size, self.source, self.input_code_pages);
2068 try writer.writeInt(u16, point_size.asWord(), .little);
2069
2070 if (resource == .dialogex) {
2071 try writer.writeInt(u16, values.weight, .little);
2072 }
2073
2074 if (resource == .dialogex) {
2075 try writer.writeInt(u8, @intFromBool(values.italic), .little);
2076 }
2077
2078 if (node.char_set) |char_set| {
2079 const value = evaluateNumberExpression(char_set, self.source, self.input_code_pages);
2080 try writer.writeInt(u8, @as(u8, @truncate(value.value)), .little);
2081 } else if (resource == .dialogex) {
2082 try writer.writeInt(u8, 1, .little); // DEFAULT_CHARSET
2083 }
2084
2085 const typeface = try self.parseQuotedStringAsWideString(node.typeface);
2086 defer self.allocator.free(typeface);
2087 try writer.writeAll(std.mem.sliceAsBytes(typeface[0 .. typeface.len + 1]));
2088 }
2089
2090 pub fn writeMenu(self: *Compiler, node: *Node.Menu, writer: *std.Io.Writer) !void {
2091 var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
2092 defer data_buffer.deinit();
2093
2094 const type_bytes = SourceBytes{
2095 .slice = node.type.slice(self.source),
2096 .code_page = self.input_code_pages.getForToken(node.type),
2097 };
2098 const resource = ResourceType.fromString(type_bytes);
2099 std.debug.assert(resource == .menu or resource == .menuex);
2100
2101 try self.writeMenuData(node, &data_buffer.writer, resource);
2102
2103 // TODO: Limit data_buffer in some way to error when writing more than u32 max bytes
2104 const data_size: u32 = std.math.cast(u32, data_buffer.written().len) orelse {
2105 return self.addErrorDetailsAndFail(.{
2106 .err = .resource_data_size_exceeds_max,
2107 .token = node.id,
2108 });
2109 };
2110 var header = try self.resourceHeader(node.id, node.type, .{
2111 .data_size = data_size,
2112 });
2113 defer header.deinit(self.allocator);
2114
2115 header.applyMemoryFlags(node.common_resource_attributes, self.source);
2116 header.applyOptionalStatements(node.optional_statements, self.source, self.input_code_pages);
2117
2118 try header.write(writer, self.errContext(node.id));
2119
2120 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
2121 try writeResourceData(writer, &data_fbs, data_size);
2122 }
2123
2124 /// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
2125 /// the writer within this function could return error.NoSpaceLeft
2126 pub fn writeMenuData(self: *Compiler, node: *Node.Menu, data_writer: *std.Io.Writer, resource: ResourceType) !void {
2127 // menu header
2128 const version: u16 = if (resource == .menu) 0 else 1;
2129 try data_writer.writeInt(u16, version, .little);
2130 const header_size: u16 = if (resource == .menu) 0 else 4;
2131 try data_writer.writeInt(u16, header_size, .little); // cbHeaderSize
2132 // Note: There can be extra bytes at the end of this header (`rgbExtra`),
2133 // but they are always zero-length for us, so we don't write anything
2134 // (the length of the rgbExtra field is inferred from the header_size).
2135 // MENU => rgbExtra: [cbHeaderSize]u8
2136 // MENUEX => rgbExtra: [cbHeaderSize-4]u8
2137
2138 if (resource == .menuex) {
2139 if (node.help_id) |help_id_node| {
2140 const help_id = evaluateNumberExpression(help_id_node, self.source, self.input_code_pages);
2141 try data_writer.writeInt(u32, help_id.value, .little);
2142 } else {
2143 try data_writer.writeInt(u32, 0, .little);
2144 }
2145 }
2146
2147 for (node.items, 0..) |item, i| {
2148 const is_last = i == node.items.len - 1;
2149 try self.writeMenuItem(item, data_writer, is_last);
2150 }
2151 }
2152
2153 pub fn writeMenuItem(self: *Compiler, node: *Node, writer: *std.Io.Writer, is_last_of_parent: bool) !void {
2154 switch (node.id) {
2155 .menu_item_separator => {
2156 // This is the 'alternate compability form' of the separator, see
2157 // https://devblogs.microsoft.com/oldnewthing/20080710-00/?p=21673
2158 //
2159 // The 'correct' way is to set the MF_SEPARATOR flag, but the Win32 RC
2160 // compiler still uses this alternate form, so that's what we use too.
2161 var flags = res.MenuItemFlags{};
2162 if (is_last_of_parent) flags.markLast();
2163 try writer.writeInt(u16, flags.value, .little);
2164 try writer.writeInt(u16, 0, .little); // id
2165 try writer.writeInt(u16, 0, .little); // null-terminated UTF-16 text
2166 },
2167 .menu_item => {
2168 const menu_item: *Node.MenuItem = @alignCast(@fieldParentPtr("base", node));
2169 var flags = res.MenuItemFlags{};
2170 for (menu_item.option_list) |option_token| {
2171 // This failing would be a bug in the parser
2172 const option = rc.MenuItem.Option.map.get(option_token.slice(self.source)) orelse unreachable;
2173 flags.apply(option);
2174 }
2175 if (is_last_of_parent) flags.markLast();
2176 try writer.writeInt(u16, flags.value, .little);
2177
2178 var result = evaluateNumberExpression(menu_item.result, self.source, self.input_code_pages);
2179 try writer.writeInt(u16, result.asWord(), .little);
2180
2181 var text = try self.parseQuotedStringAsWideString(menu_item.text);
2182 defer self.allocator.free(text);
2183 try writer.writeAll(std.mem.sliceAsBytes(text[0 .. text.len + 1]));
2184 },
2185 .popup => {
2186 const popup: *Node.Popup = @alignCast(@fieldParentPtr("base", node));
2187 var flags = res.MenuItemFlags{ .value = res.MF.POPUP };
2188 for (popup.option_list) |option_token| {
2189 // This failing would be a bug in the parser
2190 const option = rc.MenuItem.Option.map.get(option_token.slice(self.source)) orelse unreachable;
2191 flags.apply(option);
2192 }
2193 if (is_last_of_parent) flags.markLast();
2194 try writer.writeInt(u16, flags.value, .little);
2195
2196 var text = try self.parseQuotedStringAsWideString(popup.text);
2197 defer self.allocator.free(text);
2198 try writer.writeAll(std.mem.sliceAsBytes(text[0 .. text.len + 1]));
2199
2200 for (popup.items, 0..) |item, i| {
2201 const is_last = i == popup.items.len - 1;
2202 try self.writeMenuItem(item, writer, is_last);
2203 }
2204 },
2205 inline .menu_item_ex, .popup_ex => |node_type| {
2206 const menu_item: *node_type.Type() = @alignCast(@fieldParentPtr("base", node));
2207
2208 if (menu_item.type) |flags| {
2209 const value = evaluateNumberExpression(flags, self.source, self.input_code_pages);
2210 try writer.writeInt(u32, value.value, .little);
2211 } else {
2212 try writer.writeInt(u32, 0, .little);
2213 }
2214
2215 if (menu_item.state) |state| {
2216 const value = evaluateNumberExpression(state, self.source, self.input_code_pages);
2217 try writer.writeInt(u32, value.value, .little);
2218 } else {
2219 try writer.writeInt(u32, 0, .little);
2220 }
2221
2222 if (menu_item.id) |id| {
2223 const value = evaluateNumberExpression(id, self.source, self.input_code_pages);
2224 try writer.writeInt(u32, value.value, .little);
2225 } else {
2226 try writer.writeInt(u32, 0, .little);
2227 }
2228
2229 var flags: u16 = 0;
2230 if (is_last_of_parent) flags |= comptime @as(u16, @intCast(res.MF.END));
2231 // This constant doesn't seem to have a named #define, it's different than MF_POPUP
2232 if (node_type == .popup_ex) flags |= 0x01;
2233 try writer.writeInt(u16, flags, .little);
2234
2235 var text = try self.parseQuotedStringAsWideString(menu_item.text);
2236 defer self.allocator.free(text);
2237 try writer.writeAll(std.mem.sliceAsBytes(text[0 .. text.len + 1]));
2238
2239 // Only the combination of the flags u16 and the text bytes can cause
2240 // non-DWORD alignment, so we can just use the byte length of those
2241 // two values to realign to DWORD alignment.
2242 const relevant_bytes = 2 + (text.len + 1) * 2;
2243 try writeDataPadding(writer, @intCast(relevant_bytes));
2244
2245 if (node_type == .popup_ex) {
2246 if (menu_item.help_id) |help_id_node| {
2247 const help_id = evaluateNumberExpression(help_id_node, self.source, self.input_code_pages);
2248 try writer.writeInt(u32, help_id.value, .little);
2249 } else {
2250 try writer.writeInt(u32, 0, .little);
2251 }
2252
2253 for (menu_item.items, 0..) |item, i| {
2254 const is_last = i == menu_item.items.len - 1;
2255 try self.writeMenuItem(item, writer, is_last);
2256 }
2257 }
2258 },
2259 else => unreachable,
2260 }
2261 }
2262
2263 pub fn writeVersionInfo(self: *Compiler, node: *Node.VersionInfo, writer: *std.Io.Writer) !void {
2264 // NOTE: The node's length field (which is inclusive of the length of all of its children) is a u16
2265 var data_buffer: std.Io.Writer.Allocating = .init(self.allocator);
2266 defer data_buffer.deinit();
2267 const data_writer = &data_buffer.writer;
2268
2269 try data_writer.writeInt(u16, 0, .little); // placeholder size
2270 try data_writer.writeInt(u16, res.FixedFileInfo.byte_len, .little);
2271 try data_writer.writeInt(u16, res.VersionNode.type_binary, .little);
2272 const key_bytes = std.mem.sliceAsBytes(res.FixedFileInfo.key[0 .. res.FixedFileInfo.key.len + 1]);
2273 try data_writer.writeAll(key_bytes);
2274 // The number of bytes written up to this point is always the same, since the name
2275 // of the node is a constant (FixedFileInfo.key). The total number of bytes
2276 // written so far is 38, so we need 2 padding bytes to get back to DWORD alignment
2277 try data_writer.writeInt(u16, 0, .little);
2278
2279 var fixed_file_info = res.FixedFileInfo{};
2280 for (node.fixed_info) |fixed_info| {
2281 switch (fixed_info.id) {
2282 .version_statement => {
2283 const version_statement: *Node.VersionStatement = @alignCast(@fieldParentPtr("base", fixed_info));
2284 const version_type = rc.VersionInfo.map.get(version_statement.type.slice(self.source)).?;
2285
2286 // Ensure that all parts are cleared for each version, to properly account for
2287 // potential duplicate PRODUCTVERSION/FILEVERSION statements
2288 switch (version_type) {
2289 .file_version => @memset(&fixed_file_info.file_version.parts, 0),
2290 .product_version => @memset(&fixed_file_info.product_version.parts, 0),
2291 else => unreachable,
2292 }
2293
2294 for (version_statement.parts, 0..) |part, i| {
2295 const part_value = evaluateNumberExpression(part, self.source, self.input_code_pages);
2296 if (part_value.is_long) {
2297 try self.addErrorDetails(.{
2298 .err = .rc_would_error_u16_with_l_suffix,
2299 .type = .warning,
2300 .token = part.getFirstToken(),
2301 .token_span_end = part.getLastToken(),
2302 .extra = .{ .statement_with_u16_param = switch (version_type) {
2303 .file_version => .fileversion,
2304 .product_version => .productversion,
2305 else => unreachable,
2306 } },
2307 });
2308 try self.addErrorDetails(.{
2309 .err = .rc_would_error_u16_with_l_suffix,
2310 .print_source_line = false,
2311 .type = .note,
2312 .token = part.getFirstToken(),
2313 .token_span_end = part.getLastToken(),
2314 .extra = .{ .statement_with_u16_param = switch (version_type) {
2315 .file_version => .fileversion,
2316 .product_version => .productversion,
2317 else => unreachable,
2318 } },
2319 });
2320 }
2321 switch (version_type) {
2322 .file_version => {
2323 fixed_file_info.file_version.parts[i] = part_value.asWord();
2324 },
2325 .product_version => {
2326 fixed_file_info.product_version.parts[i] = part_value.asWord();
2327 },
2328 else => unreachable,
2329 }
2330 }
2331 },
2332 .simple_statement => {
2333 const statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", fixed_info));
2334 const statement_type = rc.VersionInfo.map.get(statement.identifier.slice(self.source)).?;
2335 const value = evaluateNumberExpression(statement.value, self.source, self.input_code_pages);
2336 switch (statement_type) {
2337 .file_flags_mask => fixed_file_info.file_flags_mask = value.value,
2338 .file_flags => fixed_file_info.file_flags = value.value,
2339 .file_os => fixed_file_info.file_os = value.value,
2340 .file_type => fixed_file_info.file_type = value.value,
2341 .file_subtype => fixed_file_info.file_subtype = value.value,
2342 else => unreachable,
2343 }
2344 },
2345 else => unreachable,
2346 }
2347 }
2348 try fixed_file_info.write(data_writer);
2349
2350 for (node.block_statements) |statement| {
2351 var overflow = false;
2352 self.writeVersionNode(statement, data_writer) catch |err| switch (err) {
2353 error.NoSpaceLeft => {
2354 overflow = true;
2355 },
2356 else => |e| return e,
2357 };
2358 if (overflow or data_buffer.written().len > std.math.maxInt(u16)) {
2359 try self.addErrorDetails(.{
2360 .err = .version_node_size_exceeds_max,
2361 .token = node.id,
2362 });
2363 return self.addErrorDetailsAndFail(.{
2364 .err = .version_node_size_exceeds_max,
2365 .type = .note,
2366 .token = statement.getFirstToken(),
2367 .token_span_end = statement.getLastToken(),
2368 });
2369 }
2370 }
2371
2372 // We know that data_buffer len is within the limits of a u16, since we check in the block
2373 // statements loop above which is the only place it can overflow.
2374 const data_size: u16 = @intCast(data_buffer.written().len);
2375 // And now that we know the full size of this node (including its children), set its size
2376 std.mem.writeInt(u16, data_buffer.written()[0..2], data_size, .little);
2377
2378 var header = try self.resourceHeader(node.id, node.versioninfo, .{
2379 .data_size = data_size,
2380 });
2381 defer header.deinit(self.allocator);
2382
2383 header.applyMemoryFlags(node.common_resource_attributes, self.source);
2384
2385 try header.write(writer, self.errContext(node.id));
2386
2387 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
2388 try writeResourceData(writer, &data_fbs, data_size);
2389 }
2390
2391 /// Assumes that writer is Writer.Allocating (specifically, that buffered() gets the entire data)
2392 /// TODO: This function could be nicer if writer was guaranteed to fail if it wrote more than u16 max bytes
2393 pub fn writeVersionNode(self: *Compiler, node: *Node, writer: *std.Io.Writer) !void {
2394 // We can assume that buf.items.len will never be able to exceed the limits of a u16
2395 try writeDataPadding(writer, std.math.cast(u16, writer.buffered().len) orelse return error.NoSpaceLeft);
2396
2397 const node_and_children_size_offset = writer.buffered().len;
2398 try writer.writeInt(u16, 0, .little); // placeholder for size
2399 const data_size_offset = writer.buffered().len;
2400 try writer.writeInt(u16, 0, .little); // placeholder for data size
2401 const data_type_offset = writer.buffered().len;
2402 // Data type is string unless the node contains values that are numbers.
2403 try writer.writeInt(u16, res.VersionNode.type_string, .little);
2404
2405 switch (node.id) {
2406 inline .block, .block_value => |node_type| {
2407 const block_or_value: *node_type.Type() = @alignCast(@fieldParentPtr("base", node));
2408 const parsed_key = try self.parseQuotedStringAsWideString(block_or_value.key);
2409 defer self.allocator.free(parsed_key);
2410
2411 const parsed_key_to_first_null = std.mem.sliceTo(parsed_key, 0);
2412 try writer.writeAll(std.mem.sliceAsBytes(parsed_key_to_first_null[0 .. parsed_key_to_first_null.len + 1]));
2413
2414 var has_number_value: bool = false;
2415 for (block_or_value.values) |value_value_node_uncasted| {
2416 const value_value_node = value_value_node_uncasted.cast(.block_value_value).?;
2417 if (value_value_node.expression.isNumberExpression()) {
2418 has_number_value = true;
2419 break;
2420 }
2421 }
2422 // The units used here are dependent on the type. If there are any numbers, then
2423 // this is a byte count. If there are only strings, then this is a count of
2424 // UTF-16 code units.
2425 //
2426 // The Win32 RC compiler miscompiles this count in the case of values that
2427 // have a mix of numbers and strings. This is detected and a warning is emitted
2428 // during parsing, so we can just do the correct thing here.
2429 var values_size: usize = 0;
2430
2431 try writeDataPadding(writer, std.math.cast(u16, writer.buffered().len) orelse return error.NoSpaceLeft);
2432
2433 for (block_or_value.values, 0..) |value_value_node_uncasted, i| {
2434 const value_value_node = value_value_node_uncasted.cast(.block_value_value).?;
2435 const value_node = value_value_node.expression;
2436 if (value_node.isNumberExpression()) {
2437 const number = evaluateNumberExpression(value_node, self.source, self.input_code_pages);
2438 // This is used to write u16 or u32 depending on the number's suffix
2439 const data_wrapper = Data{ .number = number };
2440 try data_wrapper.write(writer);
2441 // Numbers use byte count
2442 values_size += if (number.is_long) 4 else 2;
2443 } else {
2444 std.debug.assert(value_node.isStringLiteral());
2445 const literal_node = value_node.cast(.literal).?;
2446 const parsed_value = try self.parseQuotedStringAsWideString(literal_node.token);
2447 defer self.allocator.free(parsed_value);
2448
2449 const parsed_to_first_null = std.mem.sliceTo(parsed_value, 0);
2450 try writer.writeAll(std.mem.sliceAsBytes(parsed_to_first_null));
2451 // Strings use UTF-16 code-unit count including the null-terminator, but
2452 // only if there are no number values in the list.
2453 var value_size = parsed_to_first_null.len;
2454 if (has_number_value) value_size *= 2; // 2 bytes per UTF-16 code unit
2455 values_size += value_size;
2456 // The null-terminator is only included if there's a trailing comma
2457 // or this is the last value. If the value evaluates to empty, then
2458 // it never gets a null terminator. If there was an explicit null-terminator
2459 // in the string, we still need to potentially add one since we already
2460 // sliced to the terminator.
2461 const is_last = i == block_or_value.values.len - 1;
2462 const is_empty = parsed_to_first_null.len == 0;
2463 const is_only = block_or_value.values.len == 1;
2464 if ((!is_empty or !is_only) and (is_last or value_value_node.trailing_comma)) {
2465 try writer.writeInt(u16, 0, .little);
2466 values_size += if (has_number_value) 2 else 1;
2467 }
2468 }
2469 }
2470 var data_size_slice = writer.buffered()[data_size_offset..];
2471 std.mem.writeInt(u16, data_size_slice[0..@sizeOf(u16)], @as(u16, @intCast(values_size)), .little);
2472
2473 if (has_number_value) {
2474 const data_type_slice = writer.buffered()[data_type_offset..];
2475 std.mem.writeInt(u16, data_type_slice[0..@sizeOf(u16)], res.VersionNode.type_binary, .little);
2476 }
2477
2478 if (node_type == .block) {
2479 const block = block_or_value;
2480 for (block.children) |child| {
2481 try self.writeVersionNode(child, writer);
2482 }
2483 }
2484 },
2485 else => unreachable,
2486 }
2487
2488 const node_and_children_size = writer.buffered().len - node_and_children_size_offset;
2489 const node_and_children_size_slice = writer.buffered()[node_and_children_size_offset..];
2490 std.mem.writeInt(u16, node_and_children_size_slice[0..@sizeOf(u16)], @as(u16, @intCast(node_and_children_size)), .little);
2491 }
2492
2493 pub fn writeStringTable(self: *Compiler, node: *Node.StringTable) !void {
2494 const language = getLanguageFromOptionalStatements(node.optional_statements, self.source, self.input_code_pages) orelse self.state.language;
2495
2496 for (node.strings) |string_node| {
2497 const string: *Node.StringTableString = @alignCast(@fieldParentPtr("base", string_node));
2498 const string_id_data = try self.evaluateDataExpression(string.id);
2499 const string_id = string_id_data.number.asWord();
2500
2501 self.state.string_tables.set(
2502 self.arena,
2503 language,
2504 string_id,
2505 string.string,
2506 &node.base,
2507 self.source,
2508 self.input_code_pages,
2509 self.state.version,
2510 self.state.characteristics,
2511 ) catch |err| switch (err) {
2512 error.StringAlreadyDefined => {
2513 // It might be nice to have these errors point to the ids rather than the
2514 // string tokens, but that would mean storing the id token of each string
2515 // which doesn't seem worth it just for slightly better error messages.
2516 try self.addErrorDetails(.{
2517 .err = .string_already_defined,
2518 .token = string.string,
2519 .extra = .{ .string_and_language = .{ .id = string_id, .language = language } },
2520 });
2521 const existing_def_table = self.state.string_tables.tables.getPtr(language).?;
2522 const existing_definition = existing_def_table.get(string_id).?;
2523 return self.addErrorDetailsAndFail(.{
2524 .err = .string_already_defined,
2525 .type = .note,
2526 .token = existing_definition,
2527 .extra = .{ .string_and_language = .{ .id = string_id, .language = language } },
2528 });
2529 },
2530 error.OutOfMemory => |e| return e,
2531 };
2532 }
2533 }
2534
2535 /// Expects this to be a top-level LANGUAGE statement
2536 pub fn writeLanguageStatement(self: *Compiler, node: *Node.LanguageStatement) void {
2537 const primary = Compiler.evaluateNumberExpression(node.primary_language_id, self.source, self.input_code_pages);
2538 const sublanguage = Compiler.evaluateNumberExpression(node.sublanguage_id, self.source, self.input_code_pages);
2539 self.state.language.primary_language_id = @truncate(primary.value);
2540 self.state.language.sublanguage_id = @truncate(sublanguage.value);
2541 }
2542
2543 /// Expects this to be a top-level VERSION or CHARACTERISTICS statement
2544 pub fn writeTopLevelSimpleStatement(self: *Compiler, node: *Node.SimpleStatement) void {
2545 const value = Compiler.evaluateNumberExpression(node.value, self.source, self.input_code_pages);
2546 const statement_type = rc.TopLevelKeywords.map.get(node.identifier.slice(self.source)).?;
2547 switch (statement_type) {
2548 .characteristics => self.state.characteristics = value.value,
2549 .version => self.state.version = value.value,
2550 else => unreachable,
2551 }
2552 }
2553
2554 pub const ResourceHeaderOptions = struct {
2555 language: ?res.Language = null,
2556 data_size: DWORD = 0,
2557 };
2558
2559 pub fn resourceHeader(self: *Compiler, id_token: Token, type_token: Token, options: ResourceHeaderOptions) !ResourceHeader {
2560 const id_bytes = self.sourceBytesForToken(id_token);
2561 const type_bytes = self.sourceBytesForToken(type_token);
2562 return ResourceHeader.init(
2563 self.allocator,
2564 id_bytes,
2565 type_bytes,
2566 options.data_size,
2567 options.language orelse self.state.language,
2568 self.state.version,
2569 self.state.characteristics,
2570 ) catch |err| switch (err) {
2571 error.OutOfMemory => |e| return e,
2572 error.TypeNonAsciiOrdinal => {
2573 const win32_rc_ordinal = NameOrOrdinal.maybeNonAsciiOrdinalFromString(type_bytes).?;
2574 try self.addErrorDetails(.{
2575 .err = .invalid_digit_character_in_ordinal,
2576 .type = .err,
2577 .token = type_token,
2578 });
2579 return self.addErrorDetailsAndFail(.{
2580 .err = .win32_non_ascii_ordinal,
2581 .type = .note,
2582 .token = type_token,
2583 .print_source_line = false,
2584 .extra = .{ .number = win32_rc_ordinal.ordinal },
2585 });
2586 },
2587 error.IdNonAsciiOrdinal => {
2588 const win32_rc_ordinal = NameOrOrdinal.maybeNonAsciiOrdinalFromString(id_bytes).?;
2589 try self.addErrorDetails(.{
2590 .err = .invalid_digit_character_in_ordinal,
2591 .type = .err,
2592 .token = id_token,
2593 });
2594 return self.addErrorDetailsAndFail(.{
2595 .err = .win32_non_ascii_ordinal,
2596 .type = .note,
2597 .token = id_token,
2598 .print_source_line = false,
2599 .extra = .{ .number = win32_rc_ordinal.ordinal },
2600 });
2601 },
2602 };
2603 }
2604
2605 pub const ResourceHeader = struct {
2606 name_value: NameOrOrdinal,
2607 type_value: NameOrOrdinal,
2608 language: res.Language,
2609 memory_flags: MemoryFlags,
2610 data_size: DWORD,
2611 version: DWORD,
2612 characteristics: DWORD,
2613 data_version: DWORD = 0,
2614
2615 pub const InitError = error{ OutOfMemory, IdNonAsciiOrdinal, TypeNonAsciiOrdinal };
2616
2617 pub fn init(allocator: Allocator, id_bytes: SourceBytes, type_bytes: SourceBytes, data_size: DWORD, language: res.Language, version: DWORD, characteristics: DWORD) InitError!ResourceHeader {
2618 const type_value = type: {
2619 const resource_type = ResourceType.fromString(type_bytes);
2620 if (res.RT.fromResource(resource_type)) |rt_constant| {
2621 break :type NameOrOrdinal{ .ordinal = @intFromEnum(rt_constant) };
2622 } else {
2623 break :type try NameOrOrdinal.fromString(allocator, type_bytes);
2624 }
2625 };
2626 errdefer type_value.deinit(allocator);
2627 if (type_value == .name) {
2628 if (NameOrOrdinal.maybeNonAsciiOrdinalFromString(type_bytes)) |_| {
2629 return error.TypeNonAsciiOrdinal;
2630 }
2631 }
2632
2633 const name_value = try NameOrOrdinal.fromString(allocator, id_bytes);
2634 errdefer name_value.deinit(allocator);
2635 if (name_value == .name) {
2636 if (NameOrOrdinal.maybeNonAsciiOrdinalFromString(id_bytes)) |_| {
2637 return error.IdNonAsciiOrdinal;
2638 }
2639 }
2640
2641 const predefined_resource_type = type_value.predefinedResourceType();
2642
2643 return ResourceHeader{
2644 .name_value = name_value,
2645 .type_value = type_value,
2646 .data_size = data_size,
2647 .memory_flags = MemoryFlags.defaults(predefined_resource_type),
2648 .language = language,
2649 .version = version,
2650 .characteristics = characteristics,
2651 };
2652 }
2653
2654 pub fn deinit(self: ResourceHeader, allocator: Allocator) void {
2655 self.name_value.deinit(allocator);
2656 self.type_value.deinit(allocator);
2657 }
2658
2659 pub const SizeInfo = struct {
2660 bytes: u32,
2661 padding_after_name: u2,
2662 };
2663
2664 pub fn calcSize(self: ResourceHeader) error{Overflow}!SizeInfo {
2665 var header_size: u32 = 8;
2666 header_size = try std.math.add(
2667 u32,
2668 header_size,
2669 std.math.cast(u32, self.name_value.byteLen()) orelse return error.Overflow,
2670 );
2671 header_size = try std.math.add(
2672 u32,
2673 header_size,
2674 std.math.cast(u32, self.type_value.byteLen()) orelse return error.Overflow,
2675 );
2676 const padding_after_name = numPaddingBytesNeeded(header_size);
2677 header_size = try std.math.add(u32, header_size, padding_after_name);
2678 header_size = try std.math.add(u32, header_size, 16);
2679 return .{ .bytes = header_size, .padding_after_name = padding_after_name };
2680 }
2681
2682 pub fn writeAssertNoOverflow(self: ResourceHeader, writer: *std.Io.Writer) !void {
2683 return self.writeSizeInfo(writer, self.calcSize() catch unreachable);
2684 }
2685
2686 pub fn write(self: ResourceHeader, writer: *std.Io.Writer, err_ctx: errors.DiagnosticsContext) !void {
2687 const size_info = self.calcSize() catch {
2688 try err_ctx.diagnostics.append(.{
2689 .err = .resource_data_size_exceeds_max,
2690 .code_page = err_ctx.code_page,
2691 .token = err_ctx.token,
2692 });
2693 return error.CompileError;
2694 };
2695 return self.writeSizeInfo(writer, size_info);
2696 }
2697
2698 pub fn writeSizeInfo(self: ResourceHeader, writer: *std.Io.Writer, size_info: SizeInfo) !void {
2699 try writer.writeInt(DWORD, self.data_size, .little); // DataSize
2700 try writer.writeInt(DWORD, size_info.bytes, .little); // HeaderSize
2701 try self.type_value.write(writer); // TYPE
2702 try self.name_value.write(writer); // NAME
2703 try writer.splatByteAll(0, size_info.padding_after_name);
2704
2705 try writer.writeInt(DWORD, self.data_version, .little); // DataVersion
2706 try writer.writeInt(WORD, self.memory_flags.value, .little); // MemoryFlags
2707 try writer.writeInt(WORD, self.language.asInt(), .little); // LanguageId
2708 try writer.writeInt(DWORD, self.version, .little); // Version
2709 try writer.writeInt(DWORD, self.characteristics, .little); // Characteristics
2710 }
2711
2712 pub fn predefinedResourceType(self: ResourceHeader) ?res.RT {
2713 return self.type_value.predefinedResourceType();
2714 }
2715
2716 pub fn applyMemoryFlags(self: *ResourceHeader, tokens: []Token, source: []const u8) void {
2717 applyToMemoryFlags(&self.memory_flags, tokens, source);
2718 }
2719
2720 pub fn applyOptionalStatements(self: *ResourceHeader, statements: []*Node, source: []const u8, code_page_lookup: *const CodePageLookup) void {
2721 applyToOptionalStatements(&self.language, &self.version, &self.characteristics, statements, source, code_page_lookup);
2722 }
2723 };
2724
2725 fn applyToMemoryFlags(flags: *MemoryFlags, tokens: []Token, source: []const u8) void {
2726 for (tokens) |token| {
2727 const attribute = rc.CommonResourceAttributes.map.get(token.slice(source)).?;
2728 flags.set(attribute);
2729 }
2730 }
2731
2732 /// RT_GROUP_ICON and RT_GROUP_CURSOR have their own special rules for memory flags
2733 fn applyToGroupMemoryFlags(flags: *MemoryFlags, tokens: []Token, source: []const u8) void {
2734 // There's probably a cleaner implementation of this, but this will result in the same
2735 // flags as the Win32 RC compiler for all 986,410 K-permutations of memory flags
2736 // for an ICON resource.
2737 //
2738 // This was arrived at by iterating over the permutations and creating a
2739 // list where each line looks something like this:
2740 // MOVEABLE PRELOAD -> 0x1050 (MOVEABLE|PRELOAD|DISCARDABLE)
2741 //
2742 // and then noticing a few things:
2743
2744 // 1. Any permutation that does not have PRELOAD in it just uses the
2745 // default flags.
2746 const initial_flags = flags.*;
2747 var flags_set = std.enums.EnumSet(rc.CommonResourceAttributes).initEmpty();
2748 for (tokens) |token| {
2749 const attribute = rc.CommonResourceAttributes.map.get(token.slice(source)).?;
2750 flags_set.insert(attribute);
2751 }
2752 if (!flags_set.contains(.preload)) return;
2753
2754 // 2. Any permutation of flags where applying only the PRELOAD and LOADONCALL flags
2755 // results in no actual change by the end will just use the default flags.
2756 // For example, `PRELOAD LOADONCALL` will result in default flags, but
2757 // `LOADONCALL PRELOAD` will have PRELOAD set after they are both applied in order.
2758 for (tokens) |token| {
2759 const attribute = rc.CommonResourceAttributes.map.get(token.slice(source)).?;
2760 switch (attribute) {
2761 .preload, .loadoncall => flags.set(attribute),
2762 else => {},
2763 }
2764 }
2765 if (flags.value == initial_flags.value) return;
2766
2767 // 3. If none of DISCARDABLE, SHARED, or PURE is specified, then PRELOAD
2768 // implies `flags &= ~SHARED` and LOADONCALL implies `flags |= SHARED`
2769 const shared_set = comptime blk: {
2770 var set = std.enums.EnumSet(rc.CommonResourceAttributes).initEmpty();
2771 set.insert(.discardable);
2772 set.insert(.shared);
2773 set.insert(.pure);
2774 break :blk set;
2775 };
2776 const discardable_shared_or_pure_specified = flags_set.intersectWith(shared_set).count() != 0;
2777 for (tokens) |token| {
2778 const attribute = rc.CommonResourceAttributes.map.get(token.slice(source)).?;
2779 flags.setGroup(attribute, !discardable_shared_or_pure_specified);
2780 }
2781 }
2782
2783 /// Only handles the 'base' optional statements that are shared between resource types.
2784 fn applyToOptionalStatements(language: *res.Language, version: *u32, characteristics: *u32, statements: []*Node, source: []const u8, code_page_lookup: *const CodePageLookup) void {
2785 for (statements) |node| switch (node.id) {
2786 .language_statement => {
2787 const language_statement: *Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
2788 language.* = languageFromLanguageStatement(language_statement, source, code_page_lookup);
2789 },
2790 .simple_statement => {
2791 const simple_statement: *Node.SimpleStatement = @alignCast(@fieldParentPtr("base", node));
2792 const statement_type = rc.OptionalStatements.map.get(simple_statement.identifier.slice(source)) orelse continue;
2793 const result = Compiler.evaluateNumberExpression(simple_statement.value, source, code_page_lookup);
2794 switch (statement_type) {
2795 .version => version.* = result.value,
2796 .characteristics => characteristics.* = result.value,
2797 else => unreachable, // only VERSION and CHARACTERISTICS should be in an optional statements list
2798 }
2799 },
2800 else => {},
2801 };
2802 }
2803
2804 pub fn languageFromLanguageStatement(language_statement: *const Node.LanguageStatement, source: []const u8, code_page_lookup: *const CodePageLookup) res.Language {
2805 const primary = Compiler.evaluateNumberExpression(language_statement.primary_language_id, source, code_page_lookup);
2806 const sublanguage = Compiler.evaluateNumberExpression(language_statement.sublanguage_id, source, code_page_lookup);
2807 return .{
2808 .primary_language_id = @truncate(primary.value),
2809 .sublanguage_id = @truncate(sublanguage.value),
2810 };
2811 }
2812
2813 pub fn getLanguageFromOptionalStatements(statements: []*Node, source: []const u8, code_page_lookup: *const CodePageLookup) ?res.Language {
2814 for (statements) |node| switch (node.id) {
2815 .language_statement => {
2816 const language_statement: *Node.LanguageStatement = @alignCast(@fieldParentPtr("base", node));
2817 return languageFromLanguageStatement(language_statement, source, code_page_lookup);
2818 },
2819 else => continue,
2820 };
2821 return null;
2822 }
2823
2824 pub fn writeEmptyResource(writer: *std.Io.Writer) !void {
2825 const header = ResourceHeader{
2826 .name_value = .{ .ordinal = 0 },
2827 .type_value = .{ .ordinal = 0 },
2828 .language = .{
2829 .primary_language_id = 0,
2830 .sublanguage_id = 0,
2831 },
2832 .memory_flags = .{ .value = 0 },
2833 .data_size = 0,
2834 .version = 0,
2835 .characteristics = 0,
2836 };
2837 try header.writeAssertNoOverflow(writer);
2838 }
2839
2840 pub fn sourceBytesForToken(self: *Compiler, token: Token) SourceBytes {
2841 return .{
2842 .slice = token.slice(self.source),
2843 .code_page = self.input_code_pages.getForToken(token),
2844 };
2845 }
2846
2847 /// Helper that calls parseQuotedStringAsWideString with the relevant context
2848 /// Resulting slice is allocated by `self.allocator`.
2849 pub fn parseQuotedStringAsWideString(self: *Compiler, token: Token) ![:0]u16 {
2850 return literals.parseQuotedStringAsWideString(
2851 self.allocator,
2852 self.sourceBytesForToken(token),
2853 .{
2854 .start_column = token.calculateColumn(self.source, 8, null),
2855 .diagnostics = self.errContext(token),
2856 .output_code_page = self.output_code_pages.getForToken(token),
2857 },
2858 );
2859 }
2860
2861 fn addErrorDetailsWithCodePage(self: *Compiler, details: ErrorDetails) Allocator.Error!void {
2862 try self.diagnostics.append(details);
2863 }
2864
2865 /// Code page is looked up in input_code_pages using the token
2866 fn addErrorDetails(self: *Compiler, details_without_code_page: errors.ErrorDetailsWithoutCodePage) Allocator.Error!void {
2867 const details = ErrorDetails{
2868 .err = details_without_code_page.err,
2869 .code_page = self.input_code_pages.getForToken(details_without_code_page.token),
2870 .token = details_without_code_page.token,
2871 .token_span_start = details_without_code_page.token_span_start,
2872 .token_span_end = details_without_code_page.token_span_end,
2873 .type = details_without_code_page.type,
2874 .print_source_line = details_without_code_page.print_source_line,
2875 .extra = details_without_code_page.extra,
2876 };
2877 try self.addErrorDetailsWithCodePage(details);
2878 }
2879
2880 /// Code page is looked up in input_code_pages using the token
2881 fn addErrorDetailsAndFail(self: *Compiler, details_without_code_page: errors.ErrorDetailsWithoutCodePage) error{ CompileError, OutOfMemory } {
2882 try self.addErrorDetails(details_without_code_page);
2883 return error.CompileError;
2884 }
2885
2886 fn errContext(self: *Compiler, token: Token) errors.DiagnosticsContext {
2887 return .{
2888 .diagnostics = self.diagnostics,
2889 .token = token,
2890 .code_page = self.input_code_pages.getForToken(token),
2891 };
2892 }
2893};
2894
2895pub const OpenSearchPathError = std.fs.Dir.OpenError;
2896
2897fn openSearchPathDir(dir: std.fs.Dir, path: []const u8) OpenSearchPathError!std.fs.Dir {
2898 // Validate the search path to avoid possible unreachable on invalid paths,
2899 // see https://github.com/ziglang/zig/issues/15607 for why this is currently necessary.
2900 try validateSearchPath(path);
2901 return dir.openDir(path, .{});
2902}
2903
2904/// Very crude attempt at validating a path. This is imperfect
2905/// and AFAIK it is effectively impossible to implement perfect path
2906/// validation, since it ultimately depends on the underlying filesystem.
2907/// Note that this function won't be necessary if/when
2908/// https://github.com/ziglang/zig/issues/15607
2909/// is accepted/implemented.
2910fn validateSearchPath(path: []const u8) error{BadPathName}!void {
2911 switch (builtin.os.tag) {
2912 .windows => {
2913 // This will return error.BadPathName on non-Win32 namespaced paths
2914 // (e.g. the NT \??\ prefix, the device \\.\ prefix, etc).
2915 // Those path types are something of an unavoidable way to
2916 // still hit unreachable during the openDir call.
2917 var component_iterator = std.fs.path.componentIterator(path);
2918 while (component_iterator.next()) |component| {
2919 // https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file
2920 if (std.mem.indexOfAny(u8, component.name, "\x00<>:\"|?*") != null) return error.BadPathName;
2921 }
2922 },
2923 else => {
2924 if (std.mem.indexOfScalar(u8, path, 0) != null) return error.BadPathName;
2925 },
2926 }
2927}
2928
2929pub const SearchDir = struct {
2930 dir: std.fs.Dir,
2931 path: ?[]const u8,
2932
2933 pub fn deinit(self: *SearchDir, allocator: Allocator) void {
2934 self.dir.close();
2935 if (self.path) |path| {
2936 allocator.free(path);
2937 }
2938 }
2939};
2940
2941pub const FontDir = struct {
2942 fonts: std.ArrayList(Font) = .empty,
2943 /// To keep track of which ids are set and where they were set from
2944 ids: std.AutoHashMapUnmanaged(u16, Token) = .empty,
2945
2946 pub const Font = struct {
2947 id: u16,
2948 header_bytes: [148]u8,
2949 };
2950
2951 pub fn deinit(self: *FontDir, allocator: Allocator) void {
2952 self.fonts.deinit(allocator);
2953 }
2954
2955 pub fn add(self: *FontDir, allocator: Allocator, font: Font, id_token: Token) !void {
2956 try self.ids.putNoClobber(allocator, font.id, id_token);
2957 try self.fonts.append(allocator, font);
2958 }
2959
2960 pub fn writeResData(self: *FontDir, compiler: *Compiler, writer: *std.Io.Writer) !void {
2961 if (self.fonts.items.len == 0) return;
2962
2963 // We know the number of fonts is limited to maxInt(u16) because fonts
2964 // must have a valid and unique u16 ordinal ID (trying to specify a FONT
2965 // with e.g. id 65537 will wrap around to 1 and be ignored if there's already
2966 // a font with that ID in the file).
2967 const num_fonts: u16 = @intCast(self.fonts.items.len);
2968
2969 // u16 count + [(u16 id + 150 bytes) for each font]
2970 // Note: This works out to a maximum data_size of 9,961,322.
2971 const data_size: u32 = 2 + (2 + 150) * num_fonts;
2972
2973 var header = Compiler.ResourceHeader{
2974 .name_value = try NameOrOrdinal.nameFromString(compiler.allocator, .{ .slice = "FONTDIR", .code_page = .windows1252 }),
2975 .type_value = NameOrOrdinal{ .ordinal = @intFromEnum(res.RT.FONTDIR) },
2976 .memory_flags = res.MemoryFlags.defaults(res.RT.FONTDIR),
2977 .language = compiler.state.language,
2978 .version = compiler.state.version,
2979 .characteristics = compiler.state.characteristics,
2980 .data_size = data_size,
2981 };
2982 defer header.deinit(compiler.allocator);
2983
2984 try header.writeAssertNoOverflow(writer);
2985 try writer.writeInt(u16, num_fonts, .little);
2986 for (self.fonts.items) |font| {
2987 // The format of the FONTDIR is a strange beast.
2988 // Technically, each FONT is seemingly meant to be written as a
2989 // FONTDIRENTRY with two trailing NUL-terminated strings corresponding to
2990 // the 'device name' and 'face name' of the .FNT file, but:
2991 //
2992 // 1. When dealing with .FNT files, the Win32 implementation
2993 // gets the device name and face name from the wrong locations,
2994 // so it's basically never going to write the real device/face name
2995 // strings.
2996 // 2. When dealing with files 76-140 bytes long, the Win32 implementation
2997 // can just crash (if there are no NUL bytes in the file).
2998 // 3. The 32-bit Win32 rc.exe uses a 148 byte size for the portion of
2999 // the FONTDIRENTRY before the NUL-terminated strings, which
3000 // does not match the documented FONTDIRENTRY size that (presumably)
3001 // this format is meant to be using, so anything iterating the
3002 // FONTDIR according to the available documentation will get bogus results.
3003 // 4. The FONT resource can be used for non-.FNT types like TTF and OTF,
3004 // in which case emulating the Win32 behavior of unconditionally
3005 // interpreting the bytes as a .FNT and trying to grab device/face names
3006 // from random bytes in the TTF/OTF file can lead to weird behavior
3007 // and errors in the Win32 implementation (for example, the device/face
3008 // name fields are offsets into the file where the NUL-terminated
3009 // string is located, but the Win32 implementation actually treats
3010 // them as signed so if they are negative then the Win32 implementation
3011 // will error; this happening for TTF fonts would just be a bug
3012 // since the TTF could otherwise be valid)
3013 // 5. The FONTDIR resource doesn't actually seem to be used at all by
3014 // anything that I've found, and instead in Windows 3.0 and newer
3015 // it seems like the FONT resources are always just iterated/accessed
3016 // directly without ever looking at the FONTDIR.
3017 //
3018 // All of these combined means that we:
3019 // - Do not need or want to emulate Win32 behavior here
3020 // - For maximum simplicity and compatibility, we just write the first
3021 // 148 bytes of the file without any interpretation (padded with
3022 // zeroes to get up to 148 bytes if necessary), and then
3023 // unconditionally write two NUL bytes, meaning that we always
3024 // write 'device name' and 'face name' as if they were 0-length
3025 // strings.
3026 //
3027 // This gives us byte-for-byte .RES compatibility in the common case while
3028 // allowing us to avoid any erroneous errors caused by trying to read
3029 // the face/device name from a bogus location. Note that the Win32
3030 // implementation never actually writes the real device/face name here
3031 // anyway (except in the bizarre case that a .FNT file has the proper
3032 // device/face name offsets within a reserved section of the .FNT file)
3033 // so there's no feasible way that anything can actually think that the
3034 // device name/face name in the FONTDIR is reliable.
3035
3036 // First, the ID is written, though
3037 try writer.writeInt(u16, font.id, .little);
3038 try writer.writeAll(&font.header_bytes);
3039 try writer.splatByteAll(0, 2);
3040 }
3041 try Compiler.writeDataPadding(writer, data_size);
3042 }
3043};
3044
3045pub const StringTablesByLanguage = struct {
3046 /// String tables for each language are written to the .res file in order depending on
3047 /// when the first STRINGTABLE for the language was defined, and all blocks for a given
3048 /// language are written contiguously.
3049 /// Using an ArrayHashMap here gives us this property for free.
3050 tables: std.AutoArrayHashMapUnmanaged(res.Language, StringTable) = .empty,
3051
3052 pub fn deinit(self: *StringTablesByLanguage, allocator: Allocator) void {
3053 self.tables.deinit(allocator);
3054 }
3055
3056 pub fn set(
3057 self: *StringTablesByLanguage,
3058 allocator: Allocator,
3059 language: res.Language,
3060 id: u16,
3061 string_token: Token,
3062 node: *Node,
3063 source: []const u8,
3064 code_page_lookup: *const CodePageLookup,
3065 version: u32,
3066 characteristics: u32,
3067 ) StringTable.SetError!void {
3068 var get_or_put_result = try self.tables.getOrPut(allocator, language);
3069 if (!get_or_put_result.found_existing) {
3070 get_or_put_result.value_ptr.* = StringTable{};
3071 }
3072 return get_or_put_result.value_ptr.set(allocator, id, string_token, node, source, code_page_lookup, version, characteristics);
3073 }
3074};
3075
3076pub const StringTable = struct {
3077 /// Blocks are written to the .res file in order depending on when the first string
3078 /// was added to the block (i.e. `STRINGTABLE { 16 "b" 0 "a" }` would then get written
3079 /// with block ID 2 (the one with "b") first and block ID 1 (the one with "a") second).
3080 /// Using an ArrayHashMap here gives us this property for free.
3081 blocks: std.AutoArrayHashMapUnmanaged(u16, Block) = .empty,
3082
3083 pub const Block = struct {
3084 strings: std.ArrayList(Token) = .empty,
3085 set_indexes: std.bit_set.IntegerBitSet(16) = .{ .mask = 0 },
3086 memory_flags: MemoryFlags = MemoryFlags.defaults(res.RT.STRING),
3087 characteristics: u32,
3088 version: u32,
3089
3090 /// Returns the index to insert the string into the `strings` list.
3091 /// Returns null if the string should be appended.
3092 fn getInsertionIndex(self: *Block, index: u8) ?u8 {
3093 std.debug.assert(!self.set_indexes.isSet(index));
3094
3095 const first_set = self.set_indexes.findFirstSet() orelse return null;
3096 if (first_set > index) return 0;
3097
3098 const last_set = 15 - @clz(self.set_indexes.mask);
3099 if (index > last_set) return null;
3100
3101 var bit = first_set + 1;
3102 var insertion_index: u8 = 1;
3103 while (bit != index) : (bit += 1) {
3104 if (self.set_indexes.isSet(bit)) insertion_index += 1;
3105 }
3106 return insertion_index;
3107 }
3108
3109 fn getTokenIndex(self: *Block, string_index: u8) ?u8 {
3110 const count = self.strings.items.len;
3111 if (count == 0) return null;
3112 if (count == 1) return 0;
3113
3114 const first_set = self.set_indexes.findFirstSet() orelse unreachable;
3115 if (first_set == string_index) return 0;
3116 const last_set = 15 - @clz(self.set_indexes.mask);
3117 if (last_set == string_index) return @intCast(count - 1);
3118
3119 if (first_set == last_set) return null;
3120
3121 var bit = first_set + 1;
3122 var token_index: u8 = 1;
3123 while (bit < last_set) : (bit += 1) {
3124 if (!self.set_indexes.isSet(bit)) continue;
3125 if (bit == string_index) return token_index;
3126 token_index += 1;
3127 }
3128 return null;
3129 }
3130
3131 fn dump(self: *Block) void {
3132 var bit_it = self.set_indexes.iterator(.{});
3133 var string_index: usize = 0;
3134 while (bit_it.next()) |bit_index| {
3135 const token = self.strings.items[string_index];
3136 std.debug.print("{}: [{}] {any}\n", .{ bit_index, string_index, token });
3137 string_index += 1;
3138 }
3139 }
3140
3141 pub fn applyAttributes(self: *Block, string_table: *Node.StringTable, source: []const u8, code_page_lookup: *const CodePageLookup) void {
3142 Compiler.applyToMemoryFlags(&self.memory_flags, string_table.common_resource_attributes, source);
3143 var dummy_language: res.Language = undefined;
3144 Compiler.applyToOptionalStatements(&dummy_language, &self.version, &self.characteristics, string_table.optional_statements, source, code_page_lookup);
3145 }
3146
3147 fn trimToDoubleNUL(comptime T: type, str: []const T) []const T {
3148 var last_was_null = false;
3149 for (str, 0..) |c, i| {
3150 if (c == 0) {
3151 if (last_was_null) return str[0 .. i - 1];
3152 last_was_null = true;
3153 } else {
3154 last_was_null = false;
3155 }
3156 }
3157 return str;
3158 }
3159
3160 test "trimToDoubleNUL" {
3161 try std.testing.expectEqualStrings("a\x00b", trimToDoubleNUL(u8, "a\x00b"));
3162 try std.testing.expectEqualStrings("a", trimToDoubleNUL(u8, "a\x00\x00b"));
3163 }
3164
3165 pub fn writeResData(self: *Block, compiler: *Compiler, language: res.Language, block_id: u16, writer: *std.Io.Writer) !void {
3166 var data_buffer: std.Io.Writer.Allocating = .init(compiler.allocator);
3167 defer data_buffer.deinit();
3168 const data_writer = &data_buffer.writer;
3169
3170 var i: u8 = 0;
3171 var string_i: u8 = 0;
3172 while (true) : (i += 1) {
3173 if (!self.set_indexes.isSet(i)) {
3174 try data_writer.writeInt(u16, 0, .little);
3175 if (i == 15) break else continue;
3176 }
3177
3178 const string_token = self.strings.items[string_i];
3179 const slice = string_token.slice(compiler.source);
3180 const column = string_token.calculateColumn(compiler.source, 8, null);
3181 const code_page = compiler.input_code_pages.getForToken(string_token);
3182 const bytes = SourceBytes{ .slice = slice, .code_page = code_page };
3183 const utf16_string = try literals.parseQuotedStringAsWideString(compiler.allocator, bytes, .{
3184 .start_column = column,
3185 .diagnostics = compiler.errContext(string_token),
3186 .output_code_page = compiler.output_code_pages.getForToken(string_token),
3187 });
3188 defer compiler.allocator.free(utf16_string);
3189
3190 const trimmed_string = trim: {
3191 // Two NUL characters in a row act as a terminator
3192 // Note: This is only the case for STRINGTABLE strings
3193 const trimmed = trimToDoubleNUL(u16, utf16_string);
3194 // We also want to trim any trailing NUL characters
3195 break :trim std.mem.trimEnd(u16, trimmed, &[_]u16{0});
3196 };
3197
3198 // String literals are limited to maxInt(u15) codepoints, so these UTF-16 encoded
3199 // strings are limited to maxInt(u15) * 2 = 65,534 code units (since 2 is the
3200 // maximum number of UTF-16 code units per codepoint).
3201 // This leaves room for exactly one NUL terminator.
3202 var string_len_in_utf16_code_units: u16 = @intCast(trimmed_string.len);
3203 // If the option is set, then a NUL terminator is added unconditionally.
3204 // We already trimmed any trailing NULs, so we know it will be a new addition to the string.
3205 if (compiler.null_terminate_string_table_strings) string_len_in_utf16_code_units += 1;
3206 try data_writer.writeInt(u16, string_len_in_utf16_code_units, .little);
3207 try data_writer.writeAll(std.mem.sliceAsBytes(trimmed_string));
3208 if (compiler.null_terminate_string_table_strings) {
3209 try data_writer.writeInt(u16, 0, .little);
3210 }
3211
3212 if (i == 15) break;
3213 string_i += 1;
3214 }
3215
3216 // This intCast will never be able to fail due to the length constraints on string literals.
3217 //
3218 // - STRINGTABLE resource definitions can can only provide one string literal per index.
3219 // - STRINGTABLE strings are limited to maxInt(u16) UTF-16 code units (see 'string_len_in_utf16_code_units'
3220 // above), which means that the maximum number of bytes per string literal is
3221 // 2 * maxInt(u16) = 131,070 (since there are 2 bytes per UTF-16 code unit).
3222 // - Each Block/RT_STRING resource includes exactly 16 strings and each have a 2 byte
3223 // length field, so the maximum number of total bytes in a RT_STRING resource's data is
3224 // 16 * (131,070 + 2) = 2,097,152 which is well within the u32 max.
3225 //
3226 // Note: The string literal maximum length is enforced by the lexer.
3227 const data_size: u32 = @intCast(data_buffer.written().len);
3228
3229 const header = Compiler.ResourceHeader{
3230 .name_value = .{ .ordinal = block_id },
3231 .type_value = .{ .ordinal = @intFromEnum(res.RT.STRING) },
3232 .memory_flags = self.memory_flags,
3233 .language = language,
3234 .version = self.version,
3235 .characteristics = self.characteristics,
3236 .data_size = data_size,
3237 };
3238 // The only variable parts of the header are name and type, which in this case
3239 // we fully control and know are numbers, so they have a fixed size.
3240 try header.writeAssertNoOverflow(writer);
3241
3242 var data_fbs: std.Io.Reader = .fixed(data_buffer.written());
3243 try Compiler.writeResourceData(writer, &data_fbs, data_size);
3244 }
3245 };
3246
3247 pub fn deinit(self: *StringTable, allocator: Allocator) void {
3248 var it = self.blocks.iterator();
3249 while (it.next()) |entry| {
3250 entry.value_ptr.strings.deinit(allocator);
3251 }
3252 self.blocks.deinit(allocator);
3253 }
3254
3255 const SetError = error{StringAlreadyDefined} || Allocator.Error;
3256
3257 pub fn set(
3258 self: *StringTable,
3259 allocator: Allocator,
3260 id: u16,
3261 string_token: Token,
3262 node: *Node,
3263 source: []const u8,
3264 code_page_lookup: *const CodePageLookup,
3265 version: u32,
3266 characteristics: u32,
3267 ) SetError!void {
3268 const block_id = (id / 16) + 1;
3269 const string_index: u8 = @intCast(id & 0xF);
3270
3271 var get_or_put_result = try self.blocks.getOrPut(allocator, block_id);
3272 if (!get_or_put_result.found_existing) {
3273 get_or_put_result.value_ptr.* = Block{ .version = version, .characteristics = characteristics };
3274 get_or_put_result.value_ptr.applyAttributes(node.cast(.string_table).?, source, code_page_lookup);
3275 } else {
3276 if (get_or_put_result.value_ptr.set_indexes.isSet(string_index)) {
3277 return error.StringAlreadyDefined;
3278 }
3279 }
3280
3281 var block = get_or_put_result.value_ptr;
3282 if (block.getInsertionIndex(string_index)) |insertion_index| {
3283 try block.strings.insert(allocator, insertion_index, string_token);
3284 } else {
3285 try block.strings.append(allocator, string_token);
3286 }
3287 block.set_indexes.set(string_index);
3288 }
3289
3290 pub fn get(self: *StringTable, id: u16) ?Token {
3291 const block_id = (id / 16) + 1;
3292 const string_index: u8 = @intCast(id & 0xF);
3293
3294 const block = self.blocks.getPtr(block_id) orelse return null;
3295 const token_index = block.getTokenIndex(string_index) orelse return null;
3296 return block.strings.items[token_index];
3297 }
3298
3299 pub fn dump(self: *StringTable) !void {
3300 var it = self.iterator();
3301 while (it.next()) |entry| {
3302 std.debug.print("block: {}\n", .{entry.key_ptr.*});
3303 entry.value_ptr.dump();
3304 }
3305 }
3306};
3307
3308test "StringTable" {
3309 const S = struct {
3310 fn makeDummyToken(id: usize) Token {
3311 return Token{
3312 .id = .invalid,
3313 .start = id,
3314 .end = id,
3315 .line_number = id,
3316 };
3317 }
3318 };
3319 const allocator = std.testing.allocator;
3320 var string_table = StringTable{};
3321 defer string_table.deinit(allocator);
3322
3323 var code_page_lookup = CodePageLookup.init(allocator, .windows1252);
3324 defer code_page_lookup.deinit();
3325
3326 var dummy_node = Node.StringTable{
3327 .type = S.makeDummyToken(0),
3328 .common_resource_attributes = &.{},
3329 .optional_statements = &.{},
3330 .begin_token = S.makeDummyToken(0),
3331 .strings = &.{},
3332 .end_token = S.makeDummyToken(0),
3333 };
3334
3335 // randomize an array of ids 0-99
3336 var ids = ids: {
3337 var buf: [100]u16 = undefined;
3338 var i: u16 = 0;
3339 while (i < buf.len) : (i += 1) {
3340 buf[i] = i;
3341 }
3342 break :ids buf;
3343 };
3344 var prng = std.Random.DefaultPrng.init(0);
3345 var random = prng.random();
3346 random.shuffle(u16, &ids);
3347
3348 // set each one in the randomized order
3349 for (ids) |id| {
3350 try string_table.set(allocator, id, S.makeDummyToken(id), &dummy_node.base, "", &code_page_lookup, 0, 0);
3351 }
3352
3353 // make sure each one exists and is the right value when gotten
3354 var id: u16 = 0;
3355 while (id < 100) : (id += 1) {
3356 const dummy = S.makeDummyToken(id);
3357 try std.testing.expectError(error.StringAlreadyDefined, string_table.set(allocator, id, dummy, &dummy_node.base, "", &code_page_lookup, 0, 0));
3358 try std.testing.expectEqual(dummy, string_table.get(id).?);
3359 }
3360
3361 // make sure non-existent string ids are not found
3362 try std.testing.expectEqual(@as(?Token, null), string_table.get(100));
3363}