master
1file: std.Io.File,
2flags: packed struct {
3 block_size: std.mem.Alignment,
4 copy_file_range_unsupported: bool,
5 fallocate_punch_hole_unsupported: bool,
6 fallocate_insert_range_unsupported: bool,
7},
8section: if (is_windows) windows.HANDLE else void,
9contents: []align(std.heap.page_size_min) u8,
10nodes: std.ArrayList(Node),
11free_ni: Node.Index,
12large: std.ArrayList(u64),
13updates: std.ArrayList(Node.Index),
14update_prog_node: std.Progress.Node,
15writers: std.SinglyLinkedList,
16
17pub const growth_factor = 4;
18
19pub const Error = std.posix.MMapError || std.posix.MRemapError || std.fs.File.SetEndPosError || error{
20 NotFile,
21 SystemResources,
22 IsDir,
23 Unseekable,
24 NoSpaceLeft,
25};
26
27pub fn init(file: std.Io.File, gpa: std.mem.Allocator) !MappedFile {
28 var mf: MappedFile = .{
29 .file = file,
30 .flags = undefined,
31 .section = if (is_windows) windows.INVALID_HANDLE_VALUE else {},
32 .contents = &.{},
33 .nodes = .empty,
34 .free_ni = .none,
35 .large = .empty,
36 .updates = .empty,
37 .update_prog_node = .none,
38 .writers = .{},
39 };
40 errdefer mf.deinit(gpa);
41 const size: u64, const block_size = stat: {
42 if (is_windows) {
43 var sbi: windows.SYSTEM_BASIC_INFORMATION = undefined;
44 break :stat .{
45 try windows.GetFileSizeEx(file.handle),
46 switch (windows.ntdll.NtQuerySystemInformation(
47 .SystemBasicInformation,
48 &sbi,
49 @sizeOf(windows.SYSTEM_BASIC_INFORMATION),
50 null,
51 )) {
52 .SUCCESS => @max(sbi.PageSize, sbi.AllocationGranularity),
53 else => std.heap.page_size_max,
54 },
55 };
56 }
57 const stat = try std.posix.fstat(mf.file.handle);
58 if (!std.posix.S.ISREG(stat.mode)) return error.PathAlreadyExists;
59 break :stat .{ @bitCast(stat.size), @max(std.heap.pageSize(), stat.blksize) };
60 };
61 mf.flags = .{
62 .block_size = .fromByteUnits(std.math.ceilPowerOfTwoAssert(usize, block_size)),
63 .copy_file_range_unsupported = false,
64 .fallocate_insert_range_unsupported = false,
65 .fallocate_punch_hole_unsupported = false,
66 };
67 try mf.nodes.ensureUnusedCapacity(gpa, 1);
68 assert(try mf.addNode(gpa, .{
69 .add_node = .{
70 .size = size,
71 .alignment = mf.flags.block_size,
72 .fixed = true,
73 },
74 }) == Node.Index.root);
75 try mf.ensureTotalCapacity(@intCast(size));
76 return mf;
77}
78
79pub fn deinit(mf: *MappedFile, gpa: std.mem.Allocator) void {
80 mf.unmap();
81 mf.nodes.deinit(gpa);
82 mf.large.deinit(gpa);
83 mf.updates.deinit(gpa);
84 mf.update_prog_node.end();
85 assert(mf.writers.first == null);
86 mf.* = undefined;
87}
88
89pub const Node = extern struct {
90 parent: Node.Index,
91 prev: Node.Index,
92 next: Node.Index,
93 first: Node.Index,
94 last: Node.Index,
95 flags: Flags,
96 location_payload: Location.Payload,
97
98 pub const Flags = packed struct(u32) {
99 location_tag: Location.Tag,
100 alignment: std.mem.Alignment,
101 /// Whether this node can be moved.
102 fixed: bool,
103 /// Whether this node has been moved.
104 moved: bool,
105 /// Whether this node has been resized.
106 resized: bool,
107 /// Whether this node might contain non-zero bytes.
108 has_content: bool,
109 /// Whether a moved event on this node bubbles down to children.
110 bubbles_moved: bool,
111 unused: @Int(.unsigned, 32 - @bitSizeOf(std.mem.Alignment) - 6) = 0,
112 };
113
114 pub const Location = union(enum(u1)) {
115 small: extern struct {
116 /// Relative to `parent`.
117 offset: u32,
118 size: u32,
119 },
120 large: extern struct {
121 index: usize,
122 unused: @Int(.unsigned, 64 - @bitSizeOf(usize)) = 0,
123 },
124
125 pub const Tag = @typeInfo(Location).@"union".tag_type.?;
126 pub const Payload = extern union {
127 small: @FieldType(Location, "small"),
128 large: @FieldType(Location, "large"),
129 };
130
131 pub fn resolve(loc: Location, mf: *const MappedFile) [2]u64 {
132 return switch (loc) {
133 .small => |small| .{ small.offset, small.size },
134 .large => |large| mf.large.items[large.index..][0..2].*,
135 };
136 }
137 };
138
139 pub const FileLocation = struct {
140 offset: u64,
141 size: u64,
142
143 pub fn end(fl: FileLocation) u64 {
144 return fl.offset + fl.size;
145 }
146 };
147
148 pub const Index = enum(u32) {
149 none,
150 _,
151
152 pub const root: Node.Index = .none;
153
154 fn get(ni: Node.Index, mf: *const MappedFile) *Node {
155 return &mf.nodes.items[@intFromEnum(ni)];
156 }
157
158 pub fn parent(ni: Node.Index, mf: *const MappedFile) Node.Index {
159 return ni.get(mf).parent;
160 }
161
162 pub fn ChildIterator(comptime direction: enum { prev, next }) type {
163 return struct {
164 mf: *const MappedFile,
165 ni: Node.Index,
166 pub fn next(it: *@This()) ?Node.Index {
167 const ni = it.ni;
168 if (ni == .none) return null;
169 it.ni = @field(ni.get(it.mf), @tagName(direction));
170 return ni;
171 }
172 };
173 }
174 pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator(.next) {
175 return .{ .mf = mf, .ni = ni.get(mf).first };
176 }
177 pub fn reverseChildren(ni: Node.Index, mf: *const MappedFile) ChildIterator(.prev) {
178 return .{ .mf = mf, .ni = ni.get(mf).last };
179 }
180
181 pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
182 var child_ni = ni.get(mf).last;
183 while (child_ni != .none) {
184 try child_ni.moved(gpa, mf);
185 child_ni = child_ni.get(mf).prev;
186 }
187 }
188
189 pub fn hasMoved(ni: Node.Index, mf: *const MappedFile) bool {
190 var parent_ni = ni;
191 while (parent_ni != Node.Index.root) {
192 const parent_node = parent_ni.get(mf);
193 if (!parent_node.flags.bubbles_moved) break;
194 if (parent_node.flags.moved) return true;
195 parent_ni = parent_node.parent;
196 }
197 return false;
198 }
199 pub fn moved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
200 try mf.updates.ensureUnusedCapacity(gpa, 1);
201 ni.movedAssumeCapacity(mf);
202 }
203 pub fn cleanMoved(ni: Node.Index, mf: *const MappedFile) bool {
204 const node_moved = &ni.get(mf).flags.moved;
205 defer node_moved.* = false;
206 return node_moved.*;
207 }
208 pub fn movedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
209 if (ni.hasMoved(mf)) return;
210 const node = ni.get(mf);
211 node.flags.moved = true;
212 if (node.flags.resized) return;
213 mf.updates.appendAssumeCapacity(ni);
214 mf.update_prog_node.increaseEstimatedTotalItems(1);
215 }
216
217 pub fn hasResized(ni: Node.Index, mf: *const MappedFile) bool {
218 return ni.get(mf).flags.resized;
219 }
220 pub fn resized(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
221 try mf.updates.ensureUnusedCapacity(gpa, 1);
222 ni.resizedAssumeCapacity(mf);
223 }
224 pub fn cleanResized(ni: Node.Index, mf: *const MappedFile) bool {
225 const node_resized = &ni.get(mf).flags.resized;
226 defer node_resized.* = false;
227 return node_resized.*;
228 }
229 pub fn resizedAssumeCapacity(ni: Node.Index, mf: *MappedFile) void {
230 const node = ni.get(mf);
231 if (node.flags.resized) return;
232 node.flags.resized = true;
233 if (node.flags.moved) return;
234 mf.updates.appendAssumeCapacity(ni);
235 mf.update_prog_node.increaseEstimatedTotalItems(1);
236 }
237
238 pub fn alignment(ni: Node.Index, mf: *const MappedFile) std.mem.Alignment {
239 return ni.get(mf).flags.alignment;
240 }
241
242 fn setLocationAssumeCapacity(ni: Node.Index, mf: *MappedFile, offset: u64, size: u64) void {
243 const node = ni.get(mf);
244 if (size == 0) node.flags.has_content = false;
245 switch (node.location()) {
246 .small => |small| {
247 if (small.offset != offset) ni.movedAssumeCapacity(mf);
248 if (small.size != size) ni.resizedAssumeCapacity(mf);
249 if (std.math.cast(u32, offset)) |small_offset| {
250 if (std.math.cast(u32, size)) |small_size| {
251 node.location_payload.small = .{
252 .offset = small_offset,
253 .size = small_size,
254 };
255 return;
256 }
257 }
258 defer mf.large.appendSliceAssumeCapacity(&.{ offset, size });
259 node.flags.location_tag = .large;
260 node.location_payload = .{ .large = .{ .index = mf.large.items.len } };
261 },
262 .large => |large| {
263 const large_items = mf.large.items[large.index..][0..2];
264 if (large_items[0] != offset) ni.movedAssumeCapacity(mf);
265 if (large_items[1] != size) ni.resizedAssumeCapacity(mf);
266 large_items.* = .{ offset, size };
267 },
268 }
269 }
270
271 pub fn location(ni: Node.Index, mf: *const MappedFile) Location {
272 return ni.get(mf).location();
273 }
274
275 pub fn fileLocation(
276 ni: Node.Index,
277 mf: *const MappedFile,
278 set_has_content: bool,
279 ) FileLocation {
280 var offset, const size = ni.location(mf).resolve(mf);
281 var parent_ni = ni;
282 while (true) {
283 const parent_node = parent_ni.get(mf);
284 if (set_has_content) parent_node.flags.has_content = true;
285 if (parent_ni == .none) break;
286 parent_ni = parent_node.parent;
287 const parent_offset, _ = parent_ni.location(mf).resolve(mf);
288 offset += parent_offset;
289 }
290 return .{ .offset = offset, .size = size };
291 }
292
293 pub fn slice(ni: Node.Index, mf: *const MappedFile) []u8 {
294 const file_loc = ni.fileLocation(mf, true);
295 return mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)];
296 }
297
298 pub fn sliceConst(ni: Node.Index, mf: *const MappedFile) []const u8 {
299 const file_loc = ni.fileLocation(mf, false);
300 return mf.contents[@intCast(file_loc.offset)..][0..@intCast(file_loc.size)];
301 }
302
303 pub fn resize(ni: Node.Index, mf: *MappedFile, gpa: std.mem.Allocator, size: u64) !void {
304 try mf.resizeNode(gpa, ni, size);
305 var writers_it = mf.writers.first;
306 while (writers_it) |writer_node| : (writers_it = writer_node.next) {
307 const w: *Node.Writer = @fieldParentPtr("writer_node", writer_node);
308 w.interface.buffer = w.ni.slice(mf);
309 }
310 }
311
312 pub fn writer(ni: Node.Index, mf: *MappedFile, gpa: std.mem.Allocator, w: *Writer) void {
313 w.* = .{
314 .gpa = gpa,
315 .mf = mf,
316 .writer_node = .{},
317 .ni = ni,
318 .interface = .{
319 .buffer = ni.slice(mf),
320 .vtable = &Writer.vtable,
321 },
322 .err = null,
323 };
324 mf.writers.prepend(&w.writer_node);
325 }
326 };
327
328 pub fn location(node: *const Node) Location {
329 return switch (node.flags.location_tag) {
330 inline else => |tag| @unionInit(
331 Location,
332 @tagName(tag),
333 @field(node.location_payload, @tagName(tag)),
334 ),
335 };
336 }
337
338 pub const Writer = struct {
339 gpa: std.mem.Allocator,
340 mf: *MappedFile,
341 writer_node: std.SinglyLinkedList.Node,
342 ni: Node.Index,
343 interface: std.Io.Writer,
344 err: ?Error,
345
346 pub fn deinit(w: *Writer) void {
347 assert(w.mf.writers.popFirst() == &w.writer_node);
348 w.* = undefined;
349 }
350
351 const vtable: std.Io.Writer.VTable = .{
352 .drain = drain,
353 .sendFile = sendFile,
354 .flush = std.Io.Writer.noopFlush,
355 .rebase = growingRebase,
356 };
357
358 fn drain(
359 interface: *std.Io.Writer,
360 data: []const []const u8,
361 splat: usize,
362 ) std.Io.Writer.Error!usize {
363 const pattern = data[data.len - 1];
364 const splat_len = pattern.len * splat;
365 const start_len = interface.end;
366 assert(data.len != 0);
367 for (data) |bytes| {
368 try growingRebase(interface, interface.end, bytes.len + splat_len + 1);
369 @memcpy(interface.buffer[interface.end..][0..bytes.len], bytes);
370 interface.end += bytes.len;
371 }
372 if (splat == 0) {
373 interface.end -= pattern.len;
374 } else switch (pattern.len) {
375 0 => {},
376 1 => {
377 @memset(interface.buffer[interface.end..][0 .. splat - 1], pattern[0]);
378 interface.end += splat - 1;
379 },
380 else => for (0..splat - 1) |_| {
381 @memcpy(interface.buffer[interface.end..][0..pattern.len], pattern);
382 interface.end += pattern.len;
383 },
384 }
385 return interface.end - start_len;
386 }
387
388 fn sendFile(
389 interface: *std.Io.Writer,
390 file_reader: *std.Io.File.Reader,
391 limit: std.Io.Limit,
392 ) std.Io.Writer.FileError!usize {
393 if (limit == .nothing) return 0;
394 const pos = file_reader.logicalPos();
395 const additional = if (file_reader.getSize()) |size| size - pos else |_| std.atomic.cache_line;
396 if (additional == 0) return error.EndOfStream;
397 try growingRebase(interface, interface.end, limit.minInt64(additional));
398 switch (file_reader.mode) {
399 .positional => {
400 const fr_buf = file_reader.interface.buffered();
401 if (fr_buf.len > 0) {
402 const n = interface.write(fr_buf) catch unreachable;
403 file_reader.interface.toss(n);
404 return n;
405 }
406 const w: *Writer = @fieldParentPtr("interface", interface);
407 const n: usize = @intCast(w.mf.copyFileRange(
408 file_reader.file,
409 file_reader.pos,
410 w.ni.fileLocation(w.mf, true).offset + interface.end,
411 limit.minInt(interface.unusedCapacityLen()),
412 ) catch |err| {
413 w.err = err;
414 return error.WriteFailed;
415 });
416 if (n == 0) return error.Unimplemented;
417 file_reader.pos += n;
418 interface.end += n;
419 return n;
420 },
421 .streaming,
422 .streaming_reading,
423 .positional_reading,
424 .failure,
425 => {
426 const dest = limit.slice(interface.unusedCapacitySlice());
427 const n = try file_reader.interface.readSliceShort(dest);
428 if (n == 0) return error.EndOfStream;
429 interface.end += n;
430 return n;
431 },
432 }
433 }
434
435 fn growingRebase(
436 interface: *std.Io.Writer,
437 preserve: usize,
438 unused_capacity: usize,
439 ) std.Io.Writer.Error!void {
440 _ = preserve;
441 const total_capacity = interface.end + unused_capacity;
442 if (interface.buffer.len >= total_capacity) return;
443 const w: *Writer = @fieldParentPtr("interface", interface);
444 w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / growth_factor) catch |err| {
445 w.err = err;
446 return error.WriteFailed;
447 };
448 }
449 };
450
451 comptime {
452 if (!std.debug.runtime_safety) std.debug.assert(@sizeOf(Node) == 32);
453 }
454};
455
456fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
457 parent: Node.Index = .none,
458 prev: Node.Index = .none,
459 next: Node.Index = .none,
460 offset: u64 = 0,
461 add_node: AddNodeOptions,
462}) !Node.Index {
463 if (opts.add_node.moved or opts.add_node.resized) try mf.updates.ensureUnusedCapacity(gpa, 1);
464 const offset = opts.add_node.alignment.forward(@intCast(opts.offset));
465 const location_tag: Node.Location.Tag, const location_payload: Node.Location.Payload = location: {
466 if (std.math.cast(u32, offset)) |small_offset| break :location .{ .small, .{
467 .small = .{ .offset = small_offset, .size = 0 },
468 } };
469 try mf.large.ensureUnusedCapacity(gpa, 2);
470 defer mf.large.appendSliceAssumeCapacity(&.{ offset, 0 });
471 break :location .{ .large, .{ .large = .{ .index = mf.large.items.len } } };
472 };
473 const free_ni: Node.Index, const free_node = free: switch (mf.free_ni) {
474 .none => .{ @enumFromInt(mf.nodes.items.len), mf.nodes.addOneAssumeCapacity() },
475 else => |free_ni| {
476 const free_node = free_ni.get(mf);
477 mf.free_ni = free_node.next;
478 break :free .{ free_ni, free_node };
479 },
480 };
481 switch (opts.prev) {
482 .none => opts.parent.get(mf).first = free_ni,
483 else => |prev_ni| prev_ni.get(mf).next = free_ni,
484 }
485 switch (opts.next) {
486 .none => opts.parent.get(mf).last = free_ni,
487 else => |next_ni| next_ni.get(mf).prev = free_ni,
488 }
489 free_node.* = .{
490 .parent = opts.parent,
491 .prev = opts.prev,
492 .next = opts.next,
493 .first = .none,
494 .last = .none,
495 .flags = .{
496 .location_tag = location_tag,
497 .alignment = opts.add_node.alignment,
498 .fixed = opts.add_node.fixed,
499 .moved = true,
500 .resized = true,
501 .has_content = false,
502 .bubbles_moved = opts.add_node.bubbles_moved,
503 },
504 .location_payload = location_payload,
505 };
506 {
507 defer {
508 free_node.flags.moved = false;
509 free_node.flags.resized = false;
510 }
511 _, const parent_size = opts.parent.location(mf).resolve(mf);
512 if (offset > parent_size) try opts.parent.resize(mf, gpa, offset);
513 try free_ni.resize(mf, gpa, opts.add_node.size);
514 }
515 if (opts.add_node.moved) free_ni.movedAssumeCapacity(mf);
516 if (opts.add_node.resized) free_ni.resizedAssumeCapacity(mf);
517 return free_ni;
518}
519
520pub const AddNodeOptions = struct {
521 size: u64 = 0,
522 alignment: std.mem.Alignment = .@"1",
523 fixed: bool = false,
524 moved: bool = false,
525 resized: bool = false,
526 bubbles_moved: bool = true,
527};
528
529pub fn addOnlyChildNode(
530 mf: *MappedFile,
531 gpa: std.mem.Allocator,
532 parent_ni: Node.Index,
533 opts: AddNodeOptions,
534) !Node.Index {
535 try mf.nodes.ensureUnusedCapacity(gpa, 1);
536 const parent = parent_ni.get(mf);
537 assert(parent.first == .none and parent.last == .none);
538 return mf.addNode(gpa, .{
539 .parent = parent_ni,
540 .add_node = opts,
541 });
542}
543
544pub fn addFirstChildNode(
545 mf: *MappedFile,
546 gpa: std.mem.Allocator,
547 parent_ni: Node.Index,
548 opts: AddNodeOptions,
549) !Node.Index {
550 try mf.nodes.ensureUnusedCapacity(gpa, 1);
551 const parent = parent_ni.get(mf);
552 return mf.addNode(gpa, .{
553 .parent = parent_ni,
554 .next = parent.first,
555 .add_node = opts,
556 });
557}
558
559pub fn addLastChildNode(
560 mf: *MappedFile,
561 gpa: std.mem.Allocator,
562 parent_ni: Node.Index,
563 opts: AddNodeOptions,
564) !Node.Index {
565 try mf.nodes.ensureUnusedCapacity(gpa, 1);
566 const parent = parent_ni.get(mf);
567 return mf.addNode(gpa, .{
568 .parent = parent_ni,
569 .prev = parent.last,
570 .offset = offset: switch (parent.last) {
571 .none => 0,
572 else => |last_ni| {
573 const last_offset, const last_size = last_ni.location(mf).resolve(mf);
574 break :offset last_offset + last_size;
575 },
576 },
577 .add_node = opts,
578 });
579}
580
581pub fn addNodeAfter(
582 mf: *MappedFile,
583 gpa: std.mem.Allocator,
584 prev_ni: Node.Index,
585 opts: AddNodeOptions,
586) !Node.Index {
587 assert(prev_ni != .none);
588 try mf.nodes.ensureUnusedCapacity(gpa, 1);
589 const prev = prev_ni.get(mf);
590 const prev_offset, const prev_size = prev.location().resolve(mf);
591 return mf.addNode(gpa, .{
592 .parent = prev.parent,
593 .prev = prev_ni,
594 .next = prev.next,
595 .offset = prev_offset + prev_size,
596 .add_node = opts,
597 });
598}
599
600fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
601 const node = ni.get(mf);
602 const old_offset, const old_size = node.location().resolve(mf);
603 const new_size = node.flags.alignment.forward(@intCast(requested_size));
604 // Resize the entire file
605 if (ni == Node.Index.root) {
606 try mf.ensureCapacityForSetLocation(gpa);
607 try std.fs.File.adaptFromNewApi(mf.file).setEndPos(new_size);
608 try mf.ensureTotalCapacity(@intCast(new_size));
609 ni.setLocationAssumeCapacity(mf, old_offset, new_size);
610 return;
611 }
612 const parent = node.parent.get(mf);
613 _, var old_parent_size = parent.location().resolve(mf);
614 const trailing_end = trailing_end: switch (node.next) {
615 .none => old_parent_size,
616 else => |next_ni| {
617 const next_offset, _ = next_ni.location(mf).resolve(mf);
618 break :trailing_end next_offset;
619 },
620 };
621 assert(old_offset + old_size <= trailing_end);
622 if (old_offset + new_size <= trailing_end) {
623 // Expand the node into trailing free space
624 try mf.ensureCapacityForSetLocation(gpa);
625 ni.setLocationAssumeCapacity(mf, old_offset, new_size);
626 return;
627 }
628 if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
629 node.flags.alignment.order(mf.flags.block_size).compare(.gte))
630 insert_range: {
631 // Ask the filesystem driver to insert extents into the file without copying any data
632 const last_offset, const last_size = parent.last.location(mf).resolve(mf);
633 const last_end = last_offset + last_size;
634 assert(last_end <= old_parent_size);
635 const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
636 const range_size = node.flags.alignment.forward(
637 @intCast(requested_size +| requested_size / growth_factor),
638 ) - old_size;
639 _, const file_size = Node.Index.root.location(mf).resolve(mf);
640 while (true) switch (linux.errno(switch (std.math.order(range_file_offset, file_size)) {
641 .lt => linux.fallocate(
642 mf.file.handle,
643 linux.FALLOC.FL_INSERT_RANGE,
644 @intCast(range_file_offset),
645 @intCast(range_size),
646 ),
647 .eq => linux.ftruncate(mf.file.handle, @intCast(range_file_offset + range_size)),
648 .gt => unreachable,
649 })) {
650 .SUCCESS => {
651 var enclosing_ni = ni;
652 while (true) {
653 try mf.ensureCapacityForSetLocation(gpa);
654 const enclosing = enclosing_ni.get(mf);
655 const enclosing_offset, const old_enclosing_size =
656 enclosing.location().resolve(mf);
657 const new_enclosing_size = old_enclosing_size + range_size;
658 enclosing_ni.setLocationAssumeCapacity(mf, enclosing_offset, new_enclosing_size);
659 if (enclosing_ni == Node.Index.root) {
660 assert(enclosing_offset == 0);
661 try mf.ensureTotalCapacity(@intCast(new_enclosing_size));
662 break;
663 }
664 var after_ni = enclosing.next;
665 while (after_ni != .none) {
666 try mf.ensureCapacityForSetLocation(gpa);
667 const after = after_ni.get(mf);
668 const after_offset, const after_size = after.location().resolve(mf);
669 after_ni.setLocationAssumeCapacity(
670 mf,
671 range_size + after_offset,
672 after_size,
673 );
674 after_ni = after.next;
675 }
676 enclosing_ni = enclosing.parent;
677 }
678 return;
679 },
680 .INTR => continue,
681 .BADF, .FBIG, .INVAL => unreachable,
682 .IO => return error.InputOutput,
683 .NODEV => return error.NotFile,
684 .NOSPC => return error.NoSpaceLeft,
685 .NOSYS, .OPNOTSUPP => {
686 mf.flags.fallocate_insert_range_unsupported = true;
687 break :insert_range;
688 },
689 .PERM => return error.PermissionDenied,
690 .SPIPE => return error.Unseekable,
691 .TXTBSY => return error.FileBusy,
692 else => |e| return std.posix.unexpectedErrno(e),
693 };
694 }
695 if (node.next == .none) {
696 // As this is the last node, we simply need more space in the parent
697 const new_parent_size = old_offset + new_size;
698 try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / growth_factor);
699 try mf.ensureCapacityForSetLocation(gpa);
700 ni.setLocationAssumeCapacity(mf, old_offset, new_size);
701 return;
702 }
703 if (!node.flags.fixed) {
704 // Make space at the end of the parent for this floating node
705 const last = parent.last.get(mf);
706 const last_offset, const last_size = last.location().resolve(mf);
707 const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
708 const new_parent_size = new_offset + new_size;
709 if (new_parent_size > old_parent_size)
710 try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / growth_factor);
711 try mf.ensureCapacityForSetLocation(gpa);
712 const next_ni = node.next;
713 next_ni.get(mf).prev = node.prev;
714 switch (node.prev) {
715 .none => parent.first = next_ni,
716 else => |prev_ni| prev_ni.get(mf).next = next_ni,
717 }
718 last.next = ni;
719 node.prev = parent.last;
720 node.next = .none;
721 parent.last = ni;
722 if (node.flags.has_content) {
723 const parent_file_offset = node.parent.fileLocation(mf, false).offset;
724 try mf.moveRange(
725 parent_file_offset + old_offset,
726 parent_file_offset + new_offset,
727 old_size,
728 );
729 }
730 ni.setLocationAssumeCapacity(mf, new_offset, new_size);
731 return;
732 }
733 // Search for the first floating node following this fixed node
734 var last_fixed_ni = ni;
735 var first_floating_ni = node.next;
736 var shift = new_size - old_size;
737 var direction: enum { forward, reverse } = .forward;
738 while (true) {
739 assert(last_fixed_ni != .none);
740 const last_fixed = last_fixed_ni.get(mf);
741 assert(last_fixed.flags.fixed);
742 const old_last_fixed_offset, const last_fixed_size = last_fixed.location().resolve(mf);
743 const new_last_fixed_offset = old_last_fixed_offset + shift;
744 make_space: switch (first_floating_ni) {
745 else => {
746 const first_floating = first_floating_ni.get(mf);
747 const old_first_floating_offset, const first_floating_size =
748 first_floating.location().resolve(mf);
749 assert(old_last_fixed_offset + last_fixed_size <= old_first_floating_offset);
750 if (new_last_fixed_offset + last_fixed_size <= old_first_floating_offset)
751 break :make_space;
752 assert(direction == .forward);
753 if (first_floating.flags.fixed) {
754 shift = first_floating.flags.alignment.forward(@intCast(
755 @max(shift, first_floating_size),
756 ));
757 // Not enough space, try the next node
758 last_fixed_ni = first_floating_ni;
759 first_floating_ni = first_floating.next;
760 continue;
761 }
762 // Move the found floating node to make space for preceding fixed nodes
763 const last = parent.last.get(mf);
764 const last_offset, const last_size = last.location().resolve(mf);
765 const new_first_floating_offset = first_floating.flags.alignment.forward(
766 @intCast(@max(new_last_fixed_offset + last_fixed_size, last_offset + last_size)),
767 );
768 const new_parent_size = new_first_floating_offset + first_floating_size;
769 if (new_parent_size > old_parent_size) {
770 try mf.resizeNode(
771 gpa,
772 node.parent,
773 new_parent_size +| new_parent_size / growth_factor,
774 );
775 _, old_parent_size = parent.location().resolve(mf);
776 }
777 try mf.ensureCapacityForSetLocation(gpa);
778 if (parent.last != first_floating_ni) {
779 first_floating.prev = parent.last;
780 parent.last = first_floating_ni;
781 last.next = first_floating_ni;
782 last_fixed.next = first_floating.next;
783 switch (first_floating.next) {
784 .none => {},
785 else => |next_ni| next_ni.get(mf).prev = last_fixed_ni,
786 }
787 first_floating.next = .none;
788 }
789 if (first_floating.flags.has_content) {
790 const parent_file_offset =
791 node.parent.fileLocation(mf, false).offset;
792 try mf.moveRange(
793 parent_file_offset + old_first_floating_offset,
794 parent_file_offset + new_first_floating_offset,
795 first_floating_size,
796 );
797 }
798 first_floating_ni.setLocationAssumeCapacity(
799 mf,
800 new_first_floating_offset,
801 first_floating_size,
802 );
803 // Continue the search after the just-moved floating node
804 first_floating_ni = last_fixed.next;
805 continue;
806 },
807 .none => {
808 assert(direction == .forward);
809 const new_parent_size = new_last_fixed_offset + last_fixed_size;
810 if (new_parent_size > old_parent_size) {
811 try mf.resizeNode(
812 gpa,
813 node.parent,
814 new_parent_size +| new_parent_size / growth_factor,
815 );
816 _, old_parent_size = parent.location().resolve(mf);
817 }
818 },
819 }
820 try mf.ensureCapacityForSetLocation(gpa);
821 if (last_fixed_ni == ni) {
822 // The original fixed node now has enough space
823 last_fixed_ni.setLocationAssumeCapacity(
824 mf,
825 old_last_fixed_offset,
826 last_fixed_size + shift,
827 );
828 return;
829 }
830 // Move a fixed node into trailing free space
831 if (last_fixed.flags.has_content) {
832 const parent_file_offset = node.parent.fileLocation(mf, false).offset;
833 try mf.moveRange(
834 parent_file_offset + old_last_fixed_offset,
835 parent_file_offset + new_last_fixed_offset,
836 last_fixed_size,
837 );
838 }
839 last_fixed_ni.setLocationAssumeCapacity(mf, new_last_fixed_offset, last_fixed_size);
840 // Retry the previous nodes now that there is enough space
841 first_floating_ni = last_fixed_ni;
842 last_fixed_ni = last_fixed.prev;
843 direction = .reverse;
844 }
845}
846
847fn moveRange(mf: *MappedFile, old_file_offset: u64, new_file_offset: u64, size: u64) !void {
848 // make a copy of this node at the new location
849 try mf.copyRange(old_file_offset, new_file_offset, size);
850 // delete the copy of this node at the old location
851 if (is_linux and !mf.flags.fallocate_punch_hole_unsupported and
852 size >= mf.flags.block_size.toByteUnits() * 2 - 1) while (true)
853 switch (linux.errno(linux.fallocate(
854 mf.file.handle,
855 linux.FALLOC.FL_PUNCH_HOLE | linux.FALLOC.FL_KEEP_SIZE,
856 @intCast(old_file_offset),
857 @intCast(size),
858 ))) {
859 .SUCCESS => return,
860 .INTR => continue,
861 .BADF, .FBIG, .INVAL => unreachable,
862 .IO => return error.InputOutput,
863 .NODEV => return error.NotFile,
864 .NOSPC => return error.NoSpaceLeft,
865 .NOSYS, .OPNOTSUPP => {
866 mf.flags.fallocate_punch_hole_unsupported = true;
867 break;
868 },
869 .PERM => return error.PermissionDenied,
870 .SPIPE => return error.Unseekable,
871 .TXTBSY => return error.FileBusy,
872 else => |e| return std.posix.unexpectedErrno(e),
873 };
874 @memset(mf.contents[@intCast(old_file_offset)..][0..@intCast(size)], 0);
875}
876
877fn copyRange(mf: *MappedFile, old_file_offset: u64, new_file_offset: u64, size: u64) !void {
878 const copy_size = try mf.copyFileRange(mf.file, old_file_offset, new_file_offset, size);
879 if (copy_size < size) @memcpy(
880 mf.contents[@intCast(new_file_offset + copy_size)..][0..@intCast(size - copy_size)],
881 mf.contents[@intCast(old_file_offset + copy_size)..][0..@intCast(size - copy_size)],
882 );
883}
884
885fn copyFileRange(
886 mf: *MappedFile,
887 old_file: std.Io.File,
888 old_file_offset: u64,
889 new_file_offset: u64,
890 size: u64,
891) !u64 {
892 var remaining_size = size;
893 if (is_linux and !mf.flags.copy_file_range_unsupported) {
894 var old_file_offset_mut: i64 = @intCast(old_file_offset);
895 var new_file_offset_mut: i64 = @intCast(new_file_offset);
896 while (remaining_size >= mf.flags.block_size.toByteUnits() * 2 - 1) {
897 const copy_len = linux.copy_file_range(
898 old_file.handle,
899 &old_file_offset_mut,
900 mf.file.handle,
901 &new_file_offset_mut,
902 @intCast(remaining_size),
903 0,
904 );
905 switch (linux.errno(copy_len)) {
906 .SUCCESS => {
907 if (copy_len == 0) break;
908 remaining_size -= copy_len;
909 if (remaining_size == 0) break;
910 },
911 .INTR => continue,
912 .BADF, .FBIG, .INVAL, .OVERFLOW => unreachable,
913 .IO => return error.InputOutput,
914 .ISDIR => return error.IsDir,
915 .NOMEM => return error.SystemResources,
916 .NOSPC => return error.NoSpaceLeft,
917 .NOSYS, .OPNOTSUPP, .XDEV => {
918 mf.flags.copy_file_range_unsupported = true;
919 break;
920 },
921 .PERM => return error.PermissionDenied,
922 .TXTBSY => return error.FileBusy,
923 else => |e| return std.posix.unexpectedErrno(e),
924 }
925 }
926 }
927 return size - remaining_size;
928}
929
930fn ensureCapacityForSetLocation(mf: *MappedFile, gpa: std.mem.Allocator) !void {
931 try mf.large.ensureUnusedCapacity(gpa, 2);
932 try mf.updates.ensureUnusedCapacity(gpa, 1);
933}
934
935pub fn ensureTotalCapacity(mf: *MappedFile, new_capacity: usize) !void {
936 if (mf.contents.len >= new_capacity) return;
937 try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / growth_factor);
938}
939
940pub fn ensureTotalCapacityPrecise(mf: *MappedFile, new_capacity: usize) !void {
941 if (mf.contents.len >= new_capacity) return;
942 const aligned_capacity = mf.flags.block_size.forward(new_capacity);
943 if (!is_linux) mf.unmap() else if (mf.contents.len > 0) {
944 mf.contents = try std.posix.mremap(
945 mf.contents.ptr,
946 mf.contents.len,
947 aligned_capacity,
948 .{ .MAYMOVE = true },
949 null,
950 );
951 return;
952 }
953 if (is_windows) {
954 if (mf.section == windows.INVALID_HANDLE_VALUE) switch (windows.ntdll.NtCreateSection(
955 &mf.section,
956 windows.STANDARD_RIGHTS_REQUIRED | windows.SECTION_QUERY |
957 windows.SECTION_MAP_WRITE | windows.SECTION_MAP_READ | windows.SECTION_EXTEND_SIZE,
958 null,
959 @constCast(&@as(i64, @intCast(aligned_capacity))),
960 windows.PAGE_READWRITE,
961 windows.SEC_COMMIT,
962 mf.file.handle,
963 )) {
964 .SUCCESS => {},
965 else => return error.MemoryMappingNotSupported,
966 };
967 var contents_ptr: ?[*]align(std.heap.page_size_min) u8 = null;
968 var contents_len = aligned_capacity;
969 switch (windows.ntdll.NtMapViewOfSection(
970 mf.section,
971 windows.GetCurrentProcess(),
972 @ptrCast(&contents_ptr),
973 null,
974 0,
975 null,
976 &contents_len,
977 .ViewUnmap,
978 0,
979 windows.PAGE_READWRITE,
980 )) {
981 .SUCCESS => mf.contents = contents_ptr.?[0..contents_len],
982 else => return error.MemoryMappingNotSupported,
983 }
984 } else mf.contents = try std.posix.mmap(
985 null,
986 aligned_capacity,
987 std.posix.PROT.READ | std.posix.PROT.WRITE,
988 .{ .TYPE = if (is_linux) .SHARED_VALIDATE else .SHARED },
989 mf.file.handle,
990 0,
991 );
992}
993
994pub fn unmap(mf: *MappedFile) void {
995 if (mf.contents.len == 0) return;
996 if (is_windows)
997 _ = windows.ntdll.NtUnmapViewOfSection(windows.GetCurrentProcess(), mf.contents.ptr)
998 else
999 std.posix.munmap(mf.contents);
1000 mf.contents = &.{};
1001 if (is_windows and mf.section != windows.INVALID_HANDLE_VALUE) {
1002 windows.CloseHandle(mf.section);
1003 mf.section = windows.INVALID_HANDLE_VALUE;
1004 }
1005}
1006
1007fn verify(mf: *MappedFile) void {
1008 const root = Node.Index.root.get(mf);
1009 assert(root.parent == .none);
1010 assert(root.prev == .none);
1011 assert(root.next == .none);
1012 mf.verifyNode(Node.Index.root);
1013}
1014
1015fn verifyNode(mf: *MappedFile, parent_ni: Node.Index) void {
1016 const parent = parent_ni.get(mf);
1017 const parent_offset, const parent_size = parent.location().resolve(mf);
1018 var prev_ni: Node.Index = .none;
1019 var prev_end: u64 = 0;
1020 var ni = parent.first;
1021 while (true) {
1022 if (ni == .none) {
1023 assert(parent.last == prev_ni);
1024 return;
1025 }
1026 const node = ni.get(mf);
1027 assert(node.parent == parent_ni);
1028 const offset, const size = node.location().resolve(mf);
1029 assert(node.flags.alignment.check(@intCast(offset)));
1030 assert(node.flags.alignment.check(@intCast(size)));
1031 const end = offset + size;
1032 assert(end <= parent_offset + parent_size);
1033 assert(offset >= prev_end);
1034 assert(node.prev == prev_ni);
1035 mf.verifyNode(ni);
1036 prev_ni = ni;
1037 prev_end = end;
1038 ni = node.next;
1039 }
1040}
1041
1042const assert = std.debug.assert;
1043const builtin = @import("builtin");
1044const is_linux = builtin.os.tag == .linux;
1045const is_windows = builtin.os.tag == .windows;
1046const linux = std.os.linux;
1047const MappedFile = @This();
1048const std = @import("std");
1049const windows = std.os.windows;