Commit b2b0bf0506
Changed files (5)
lib
std
fs
lib/std/fs/file.zig
@@ -652,25 +652,26 @@ pub const File = struct {
pub const CopyRangeError = os.CopyFileRangeError;
- pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!usize {
+ pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!u64 {
const adjusted_len = math.cast(usize, len) catch math.maxInt(usize);
- return os.copy_file_range(in.handle, in_offset, out.handle, out_offset, adjusted_len, 0);
+ const result = try os.copy_file_range(in.handle, in_offset, out.handle, out_offset, adjusted_len, 0);
+ return result;
}
/// Returns the number of bytes copied. If the number read is smaller than `buffer.len`, it
/// means the in file reached the end. Reaching the end of a file is not an error condition.
- pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!usize {
+ pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: u64) CopyRangeError!u64 {
var total_bytes_copied: u64 = 0;
var in_off = in_offset;
var out_off = out_offset;
while (total_bytes_copied < len) {
- const amt_copied = try copyRange(in, in_off, out, out_off, len);
- if (amt_copied == 0) return @intCast(usize, total_bytes_copied);
+ const amt_copied = try copyRange(in, in_off, out, out_off, len - total_bytes_copied);
+ if (amt_copied == 0) return total_bytes_copied;
total_bytes_copied += amt_copied;
in_off += amt_copied;
out_off += amt_copied;
}
- return @intCast(usize, total_bytes_copied);
+ return total_bytes_copied;
}
pub const WriteFileOptions = struct {
src/link/Elf.zig
@@ -2757,7 +2757,7 @@ fn writeSymbol(self: *Elf, index: usize) !void {
if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
// Move all the symbols to a new file location.
const new_offset = self.findFreeSpace(needed_size, sym_align);
- const existing_size = syms_sect.sh_info * sym_size;
+ const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
const amt = try self.base.file.?.copyRangeAll(syms_sect.sh_offset, self.base.file.?, new_offset, existing_size);
if (amt != existing_size) return error.InputOutput;
syms_sect.sh_offset = new_offset;
@@ -2990,7 +2990,7 @@ fn pwriteDbgInfoNops(
buf: []const u8,
next_padding_size: usize,
trailing_zero: bool,
- offset: usize,
+ offset: u64,
) !void {
const tracy = trace(@src());
defer tracy.end();
src/link/MachO.zig
@@ -1264,6 +1264,10 @@ fn updateString(self: *MachO, old_str_off: u32, new_name: []const u8) !u32 {
return self.makeString(new_name);
}
+/// TODO This should not heap allocate, instead it should utilize a fixed size, statically allocated
+/// global const array. You could even use pwritev to write the same buffer multiple times with only
+/// 1 syscall if you needed to, for example, write 8192 bytes using a buffer of only 4096 bytes.
+/// This size parameter should probably be a usize not u64.
fn addPadding(self: *MachO, size: u64, file_offset: u64) !void {
if (size == 0) return;
src/codegen.zig
@@ -32,7 +32,7 @@ pub const BlockData = struct {
/// comptime assert that makes sure we guessed correctly about the size. This only
/// exists so that we can bitcast an arch-independent field to and from the real MCValue.
pub const AnyMCValue = extern struct {
- a: u64,
+ a: usize,
b: u64,
};
src/main.zig
@@ -2541,7 +2541,7 @@ fn fmtPathFile(
check_mode: bool,
dir: fs.Dir,
sub_path: []const u8,
-) (FmtError || error{Overflow})!void {
+) FmtError!void {
const source_file = try dir.openFile(sub_path, .{});
var file_closed = false;
errdefer if (!file_closed) source_file.close();
@@ -2554,7 +2554,7 @@ fn fmtPathFile(
const source_code = source_file.readToEndAllocOptions(
fmt.gpa,
max_src_size,
- try std.math.cast(usize, stat.size),
+ std.math.cast(usize, stat.size) catch return error.FileTooBig,
@alignOf(u8),
null,
) catch |err| switch (err) {